query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Softmax loss function, vectorized version. Inputs and outputs are the same as softmax_loss_naive.
def softmax_loss_vectorized(W, X, y, reg): # Initialize the loss and gradient to zero. loss = 0.0 dW = np.zeros_like(W) ############################################################################# # TODO: Compute the softmax loss and its gradient using no explicit loops. # # Store the loss in loss and the gradient in dW. If you are not careful # # here, it is easy to run into numeric instability. Don't forget the # # regularization! # ############################################################################# N = X.shape[0] f = np.dot(X, W) f -= np.amax(f, axis = 1, keepdims = True) # for numerical stability exp_f = np.exp(f) exp_fyi = exp_f[range(N), y].reshape((N, 1)) # correct class probabilities sum_exp_f = np.sum(exp_f, axis = 1, keepdims = True) losses = -np.log(exp_fyi / sum_exp_f) loss = 1 / N * np.sum(losses) + reg * np.sum(W * W) P = exp_f / sum_exp_f y_one_hot = np.zeros_like(P) y_one_hot[range(len(y)), y] = 1 df = 1 / N * (P - y_one_hot) dW = np.dot(X.T, df) ############################################################################# # END OF YOUR CODE # ############################################################################# return loss, dW
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[1]\n num_classes = W.shape[0]\n #############################################################################\n # Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n for i in range(num_train): # for each image\n # compute the score\n scores = W.dot(X[:, i])\n\n # shift the values of f so that the highest number is 0:\n scores -= np.max(scores)\n\n # compute the loss\n loss += -np.log(np.exp(scores[y[i]]) / np.sum(np.exp(scores)))\n\n # gradient(https://github.com/seyedamo/cs231n/blob/master/assignment1/cs231n/classifiers/softmax.py)\n scores = np.exp(scores)\n scores /= np.sum(scores)\n for j in range(num_classes): # for each class\n dW[j, :] += scores[j] * X[:, i].T\n\n # dW wrt correct class scores w_yi\n dW[y[i], :] += -X[:, i].T\n\n # Average the loss \n loss /= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n\n # average of the gradient\n dW /= num_train\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return loss, dW", "def softmax_loss(x, y):\n probs = np.exp(x - np.max(x, axis=1, keepdims=True))\n probs /= np.sum(probs, axis=1, keepdims=True)\n N = x.shape[0]\n loss = -np.sum(np.log(probs[np.arange(N), y])) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx", "def softmax_loss_naive(W, X, y, reg):\r\n # Initialize the loss and gradient to zero.\r\n loss = 0.0\r\n dW = np.zeros_like(W)\r\n num_train = X.shape[1] # d*n\r\n num_class = W.shape[0]\r\n\r\n #############################################################################\r\n # Compute the softmax loss and its gradient using explicit loops. #\r\n # Store the loss in loss and the gradient in dW. If you are not careful #\r\n # here, it is easy to run into numeric instability. Don't forget the #\r\n # regularization! #\r\n #############################################################################\r\n loss = 0.0\r\n for i in range(num_train):\r\n X_i = X[:,i] # D*1\r\n score_i = W.dot(X_i)\r\n score_i -= np.max(score_i) #C*1 but keepdims = false so it becomes 1*C\r\n exp_score_i = np.exp(score_i)\r\n probs_i = exp_score_i/np.sum(exp_score_i) #1*C\r\n correct_logprobs_i = -np.log(probs_i[y[i]])\r\n loss += correct_logprobs_i\r\n \r\n dscore_i = probs_i.reshape(num_class,-1)#c*1\r\n dscore_i[y[i]] -= 1 #C*1\r\n X_i = X_i.reshape(1,-1)# 1*D\r\n dW += dscore_i.dot(X_i)\r\n \r\n loss /= num_train\r\n loss += 0.5*reg*np.sum(W*W)\r\n\r\n dW /= num_train\r\n dW += reg*W\r\n \r\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n #############################################################################\n # START OF YOUR CODE #\n #############################################################################\n # construct a one-hot vector for y\n onehot_y = np.zeros((y.size, W.shape[1]))\n onehot_y[np.arange(y.size), y] = 1\n dW = dW.T\n for i in range(y.shape[0]):\n f = np.dot(X[i], W)\n \n for j in range(W.shape[1]):\n e_f = np.exp(f - np.max(f))\n softmax = e_f / e_f.sum()\n loss -= onehot_y[i][j] * np.log(softmax[j])\n dW[j] -= X[i] * (onehot_y[i][j] - softmax[j])\n \n loss = loss / y.shape[0] + reg * np.linalg.norm(W)\n dW = dW.T / y.shape[0] + 2 * reg * W\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n\n return loss, dW", "def softmax_loss(x, y):\n N, C = x.shape\n loss, dx = 0, np.zeros(x.shape) \n for i in range(N):\n loss += -np.log(np.exp(x[i,y[i]])/np.sum(np.exp(x[i,:])))\n dx[i,:] = np.exp(x[i,:])/np.sum(np.exp(x[i,:]))\n dx[i,y[i]] += (-1)\n \n loss /= N\n dx /= N\n return loss, dx", "def softmax_loss(x, y):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n N=x.shape[0]\n\n \n x-=np.max(x,axis=1,keepdims=True)\n temp=np.exp(x)\n dr_vec=np.sum(temp,axis=1,keepdims=True)\n\n nr=(x[np.arange(N),y]).reshape([N,1])\n loss=np.sum(-(nr)+np.log(dr_vec))\n \n loss=(loss/N)\n temp/=dr_vec\n temp[np.arange(N),y] -= 1\n \n dx = temp/N\n \n return loss, dx", "def softmax_loss(x, y):\n loss, dx = None, None\n ###########################################################################\n # TODO: Implement the loss and gradient for softmax classification. This #\n # will be similar to the softmax loss vectorized implementation in #\n # cs231n/classifiers/softmax.py. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n num_train = x.shape[0]\n\n x = np.exp(x)\n temp_sum = np.sum(x, axis = 1, keepdims = True)\n x = x / temp_sum\n softmax_result = x\n trans_y = np.zeros((x.shape[0],x.shape[1]))\n trans_y[np.arange(x.shape[0]), y] += 1\n x = - np.log(x)\n x = x * trans_y\n x_sum = np.sum(x)\n loss = x_sum / num_train\n loss = loss + \n\n dx = softmax_result - trans_y\n dx = dx / num_train\n\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return loss, dx", "def softmax_loss(x, y):\n def softmax(x):\n exps = np.exp(x)\n return exps / np.sum(exps, axis=1)[:,None]\n\n N = y.shape[0]\n p = softmax(x)\n log_likelihood = -np.log(p[range(N),y])\n loss = np.sum(log_likelihood) / N\n\n dx = p.copy()\n dx[range(N),y] -= 1\n dx = dx/N\n\n return loss, dx", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n # softmax P(Y=k|X=x_i) = e^{s_k}/∑e^{s_j} softmax loss = -log(softmax)\n num_train = X.shape[0]\n num_class = W.shape[1]\n for i in range(num_train):\n scores = X[i].dot(W) # get scores\n max_score = np.max(scores)\n scores -= max_score # 考虑数值计算稳定性 softmax = (e^s_c - max)/∑(e^s_j - max)\n correct_score = scores[y[i]] # score_correct\n P_ic = np.exp(correct_score)/np.sum(np.exp(scores))\n loss += -np.log(P_ic)\n for j in range(num_class):\n if j == y[i]:\n dW[:, j] += (P_ic - 1) * X[i].T\n else:\n P_ij = np.exp(scores[j])/np.sum(np.exp(scores))\n dW[:, j] += P_ij * X[i].T\n \n \n loss /= num_train\n loss += reg*np.sum(W*W)\n dW /= num_train\n dW += 2 * reg * W\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n pass\n num_tran = X.shape[0]\n num_classes = W.shape[1]\n loss_par =np.zeros(num_tran)\n\n Score = np.dot(X,W)\n expS = np.exp(Score)\n # for i in num_tran:\n sumS = np.sum(expS,axis=1)\n sumS = sumS.reshape(sumS.shape[0],1)\n normalize = np.divide(expS,sumS)\n softmax = -np.log(normalize)\n\n for i in np.arange(num_tran):\n loss_par[i]=softmax[i, y[i]]\n for j in np.arange(num_classes) :\n if j!=y[i]:\n # dW[:,j]+=1/normalize[i,y[i]]*expS[i,y[i]]*expS[i,j]/np.power(sumS[i],2) *X[i,:]\n dW[:,j]+=expS[i,j]/sumS[i] *X[i,:]\n else:\n # dW[:,y[i]]+=-1/normalize[i,y[i]]*expS[i,y[i]]*(sumS[i]-expS[i,y[i]])/np.power(sumS[i],2) *X[i,:]\n dW[:,y[i]]+=-(sumS[i]-expS[i,y[i]])/sumS[i] *X[i,:]\n\n dW /=num_tran\n\n loss = np.sum(loss_par) / num_tran\n # print num_tran,loss\n\n dW+=reg*W\n loss+=0.5*reg*np.sum(W*W)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[0]\n num_class = W.shape[1]\n #scores = np.zeros(num_train,num_class)\n scores = X.dot(W)\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n for i in range(num_train):\n # compute Li\n fmax= np.max(scores[i])\n scores[i] -= fmax\n correct_class_score = scores[i,y[i]]\n M = np.exp(correct_class_score)/np.sum(np.exp(scores[i]))\n loss += -np.log(M)\n for j in range(num_class):\n N = np.exp(scores[i,j])/np.sum(np.exp(scores[i]))\n if j ==y[i]:\n dW[:,y[i]]+= (M-1)*X[i].T\n else:\n dW[:,j] += N*X[i].T \n loss /= num_train\n loss += reg*np.sum(W*W)\n dW /= num_train \n dW += 2*reg*W \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n (num_class, D), (D, num_train) = (W.shape, X.shape)\n class_scores = np.dot(W, X)\n \n # Subtract maximum unnormalized score from each set of class scores\n for i in range(num_train):\n max_class_score = np.max(class_scores[:, i])\n for j in range(num_class):\n class_scores[j, i] -= max_class_score\n \n # Compute softmax and update gradient\n for i in range(num_train):\n normalization_term = sum(np.exp(class_score) for class_score in class_scores[:, i])\n for j in range(num_class):\n class_scores[j, i] = np.exp(class_scores[j, i]) / normalization_term\n # Thanks again to MyHumbleSelf for making me examine this further and discover a bug in my derivation of the softmax gradient!\n dW[j] += (class_scores[j, i] - (j==y[i])) * X[:, i]\n \n # Compute cross-entropy errors and total loss from that\n losses = [np.log(class_scores[y[i], i]) for i in range(num_train)]\n loss = -sum(losses) / num_train\n\n # Add regularization to loss and normalize dW\n loss += 0.5 * reg * np.sum(W * W)\n dW /= num_train\n dW += reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax(x):\n # (n_samples, n_classes)\n if len(x.shape) == 2:\n row_max = np.max(x, axis=1)\n x -= row_max.reshape((x.shape[0], 1))\n x = np.exp(x)\n row_sum = np.sum(x, axis=1)\n x /= row_sum.reshape((x.shape[0], 1))\n # (n_samples, n_tasks, n_classes)\n elif len(x.shape) == 3:\n row_max = np.max(x, axis=2)\n x -= row_max.reshape(x.shape[:2] + (1,))\n x = np.exp(x)\n row_sum = np.sum(x, axis=2)\n x /= row_sum.reshape(x.shape[:2] + (1,))\n return x", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n for i in range(X.shape[0]):\n scores = X[i].dot(W)\n \n idx_max = np.argmax(scores)\n s_max = scores[idx_max]\n scores -= s_max # shift for numerical stability\n \n temp = np.exp(scores)\n summation = np.sum(temp)\n loss += (- scores[y[i]] + np.log(summation))\n \n # computing gradients\n # (1) an explicit version:\n# for j in range(W.shape[1]):\n# if j == y[i]:\n# dW[:, j] -= X[i]\n# dW[:, idx_max] -= (-X[i])\n \n# dW[:, j] += (1 / summation) * temp[j] * X[i]\n# dW[:, idx_max] += (1 / summation) * temp[j] * (-X[i])\n# elif j == idx_max:\n# dW[:, j] += 0 # X[i] + (-X[i]) = 0\n# else:\n# dW[:, j] += (1 / summation) * temp[j] * X[i]\n# dW[:, idx_max] += (1 / summation) * temp[j] * (-X[i])\n \n # (2) a more concise version:\n softmax_scores = temp / summation\n for j in range(W.shape[1]):\n if j == y[i]:\n dW[:, j] += (-1 + softmax_scores[j]) * X[i]\n else:\n dW[:, j] += softmax_scores[j] * X[i]\n \n loss /= X.shape[0]\n dW /= X.shape[0]\n \n loss += reg * np.sum(W * W)\n dW += 2 * reg * W\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_train = X.shape[0]\n num_classes = W.shape[1]\n for i in xrange(num_train):\n scores = X[i, :].dot(W)\n scores -= np.max(scores)\n correct_scores = scores[y[i]]\n score_sum = np.sum(np.exp(scores))\n h = np.exp(correct_scores) / score_sum\n loss += -np.log(h)\n for j in xrange(num_classes):\n if j == y[i]:\n dW[:, y[i]] += (np.exp(scores[j]) / score_sum - 1) * X[i, :]\n else:\n dW[:, j] += (np.exp(scores[j]) / score_sum) * X[i, :]\n \n \n loss /= num_train + ( reg * np.sum(W * W))\n dW /= num_train\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train=X.shape[0]\n num_class=W.shape[1]\n num_feature=X.shape[1]\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n for i in range(num_train):\n #W*Xi C*1\n x=np.exp(np.dot(W.T,X[i,:]))\n denominator=np.sum(x)\n numerator=x[y[i]]\n loss-=np.log(numerator/denominator)\n #numerator and denominator\n #for j in range(num_class):\n normalize_score=x/denominator\n nm=np.reshape(normalize_score, (num_class, 1))\n \n #CxD\n dscore=nm.dot(np.reshape(X[i,:],(1,num_feature)))\n #print(dscore.shape)\n\n dscore[y[i],:]-=X[i,:]\n dW+=dscore.T\n\n loss/=num_train\n dW = dW/num_train + reg*W\n #\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax(x):\n shape = x.shape\n probs = np.exp(x - np.max(x, axis=len(shape) - 1, keepdims=True))\n probs /= np.sum(probs, axis=len(shape) - 1, keepdims=True)\n return probs", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_classes = W.shape[1]\n num_train = X.shape[0]\n\n for i in xrange(num_train):\n scores = X[i].dot(W)\n\n # Normalization trick to resolve numerical instability\n # when dealing with the large exponential terms.\n scores -= np.max(scores)\n\n # Cache some terms that are used repeatedly.\n exp_scores = np.exp(scores)\n sum_exp_scores = np.sum(exp_scores)\n correct_class_score = scores[y[i]]\n \n # Update the loss \n loss -= correct_class_score\n loss += np.log(sum_exp_scores)\n\n # Update the gradient\n dW[:,y[i]] -= X[i,:].T\n for j in xrange(num_classes):\n dW[:,j] += ((X[i,:].T * exp_scores[j]) / sum_exp_scores)\n\n \n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n\n dW /= num_train\n\n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n\n dW += reg*W\n \n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss(x, y):\n shifted_logits = x - np.max(x, axis=1, keepdims=True)\n Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)\n log_probs = shifted_logits - np.log(Z)\n probs = np.exp(log_probs)\n N = x.shape[0]\n loss = -np.sum(log_probs[np.arange(N), y]) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx", "def softmax(x):\n #pass # TODO: Compute and return softmax(x)\n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def softmax_loss(x, y):\n # softmax\n num = np.exp(x)\n den = np.sum(num, axis=1)\n softmax = num/den[:, None]\n N = x.shape[0]\n\n # compute the los per class\n loss = softmax[np.arange(N), y]\n loss = -np.log(loss)\n\n # sum all the losses and divide by number of class\n # Also add the regularization loss term\n loss = np.sum(loss)/N \n \n dscores = softmax\n dscores[np.arange(N), y] -= 1\n dscores /= N\n\n return loss, dscores", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n # needed for calculations\n num_train = X.shape[1]\n\n for i in xrange(num_train):\n # calculate the scores for the current training example with the current weights\n scores = W.dot(X[:, i])\n # scale by the max for numerical stability\n scores -= np.max(scores)\n # calculate the loss\n loss += -scores[y[i]] + np.log(np.sum(np.exp(scores)))\n\n ## L' = -1_y + 1/(\\sum_{}^{} e^f) * e^f\n # e^f\n scores = np.exp(scores)\n # 1/(\\sum_{}^{} e^f)\n scores /= np.sum(scores)\n # -1_y\n scores[y[i]] -= 1\n\n # now scale it by the data\n # we need to use [:, np.newaxis] because when you make a X by 1 dimension slices in numpy the 1 dimension is null\n dW += scores[:, np.newaxis].dot(X[:, i][:, np.newaxis].T)\n\n\n # get the average loss\n loss /= num_train\n # get the average gradient\n dW /= num_train\n\n # regularize the loss function\n loss += 0.5 * reg * np.sum(W * W)\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[0]\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n scores = X.dot(W)\n scores_exp = np.exp(scores-np.max(scores, axis=1, keepdims=True))\n\n sum = np.sum(scores_exp, axis=1, keepdims=True)\n probability = scores_exp/sum\n #list containing the correct classification\n indices = [range(num_train), y]\n correct_class_score = probability[indices]\n\n #calculate -log(prob_y) and take the sum across all training examples\n loss = np.sum(-np.log(correct_class_score))\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W)\n\n #Compute Gradient\n probability[indices] -=1\n dW = X.T.dot(probability)\n dW /= num_train\n dW += .5 * reg * W\n\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_train = X.shape[0]\n num_classe = W.shape[1]\n loss = 0.0\n\n for i in range(num_train): #pour chaque image de l'ensemble d'entrainement\n scores = X[i].dot(W)\n scores -= max(scores)\n\n correct_class_score = scores[y[i]] #y[i]=c\n e_syi = np.exp(correct_class_score)\n e_sj = np.sum(np.exp(scores))\n\n loss -= np.log(e_syi/e_sj)\n\n for k in range(num_classe): #pour chaque classe\n dW[:, k] += ((np.exp(scores[k])/e_sj) - (k == y[i])) * X[i].T\n\n # Right now the loss is a sum over all training examples, but we want it\n # to be an average instead so we divide by num_train.\n loss /= num_train\n dW/= num_train\n\n # Add regularization to the loss.\n loss += reg * np.sum(W * W)\n dW += 2 * reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n for i in range(X.shape[0]):\n# c = np.matmul(X[i],W)\n# c -= np.amax(c)\n# e_c = np.exp(c)\n# denom = np.sum(e_c)\n# #Nice fact: we know that the largest element in c will also be the largest softmax value, so we only\n# # need to transform that one value. \n# sm_c = e_c/denom\n# \n# loss1 += -np.log(sm_c[y[i]])\n\n # Need to make this whole dang thing more numerically stable. \n c = np.matmul(X[i],W)\n c -= np.amax(c)\n e_c = np.exp(c)\n denom = np.sum(e_c)\n sm_c = e_c/denom\n\n loss += np.log(denom) - c[y[i]]\n# print(-np.log(sm_c[y[i]]) - (np.log(denom)-c[y[i]]))\n\n \"\"\"They are basically the same value\"\"\"\n\n # now computing some gradients\n dL_ds = sm_c\n dL_ds[y[i]] -= 1\n #note that sm_c is modified now!\n \"\"\" #ah, something fundamentally different is happening with numpy. When an array element\n is changed, it's really changed for good. And it changes for all pointers pointing to same object.\n yikes. Actually it's the same with python lists. Anything pointing to And underlying object can\n change that underlying object for all things that point to it. Alas.\"\"\"\n# import pdb; pdb.set_trace()\n \"\"\"Okay I just coudln't bear the for loops...\"\"\"\n dW_update = np.matmul(X[i].reshape(1,X.shape[1]).T,dL_ds[np.newaxis,:])\n dW+=dW_update\n # for n in range(W.shape[0]):\n# for m in range(W.shape[1]):\n# if m == y[i]:\n# dW[n,m] += X[i,n]*(sm_c[m]-e_c[m])\n# else:\n# dW[n,m] += X[i,n]*sm_c[m]\n\n # should be numerically unstable I think.\n\n loss /= X.shape[0]\n loss += reg*np.sum(W*W)\n\n dW /= X.shape[0]\n dW += reg*2*W\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def softmax(x):\r\n e_x = np.exp(x - np.expand_dims(np.max(x, axis=-1), axis=-1))\r\n return e_x / np.expand_dims(e_x.sum(axis=-1), axis=-1) # only difference\r", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_classes = W.shape[1]\n num_train = X.shape[0]\n for i in range(num_train):\n score = X[i].dot(W)\n exp_score = np.exp(score)\n probability = exp_score[y[i]] / exp_score.sum()\n loss += -np.log(probability)\n dp = -1 / probability\n for j in range(num_classes):\n ds = np.exp(score[j])\n if j == y[i]:\n des = (exp_score.sum() - exp_score[y[i]]) / np.square(exp_score.sum())\n else:\n des = -(exp_score[y[i]]) / np.square(exp_score.sum())\n dW[:, j] += X[i].T * ds * des * dp # chain rule\n\n loss /= num_train\n dW /= num_train\n\n loss += 0.5 * reg * np.sum(W * W)\n dW += reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[0]\n num_classes = W.shape[1]\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n scores = np.dot(X,W)\n scores = (scores.T - np.max(scores,1)).T\n for i in xrange(num_train):\n nominator = np.exp(scores[i,:])\n denominator = np.sum(np.exp(scores[i,:]))\n loss -= np.log(nominator[y[i]]/denominator)\n for j in xrange(num_classes):\n dW[:,j] += (nominator[j]/denominator)*X[i,:]\n dW[:,y[i]] -= X[i,:]\n\n loss /= num_train\n dW /= num_train\n loss += 0.5*reg*np.sum(W*W)\n dW += reg*W\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax(x):\n pass # TODO: Compute and return softmax(x)\n\n exp_x = np.exp(x)\n sum_x = np.sum(exp_x, axis=0)\n softmax = exp_x/sum_x\n \n return softmax", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_train = X.shape[0]\n num_classes = W.shape[1]\n\n # Calculate loss for each example\n f = np.zeros((num_train, num_classes))\n f_max = np.zeros((num_train, 1))\n for i in xrange(num_train):\n for j in xrange(num_classes):\n f[i, j] = np.dot(X[i, :], W[:, j])\n if f[i, j] > f_max[i]:\n f_max[i] = f[i, j]\n\n exp_f = np.zeros_like(f)\n sum_exp_f = np.zeros((num_train, 1))\n for i in xrange(num_train):\n for j in xrange(num_classes):\n f[i, j] -= f_max[i]\n exp_f[i, j] = math.exp(f[i, j])\n sum_exp_f[i] += exp_f[i, j]\n\n for i in xrange(num_train):\n loss += -math.log(exp_f[i, y[i]] / sum_exp_f[i])\n\n loss /= num_train\n\n # Calculate regularization term\n reg_term = 0.0\n for i in xrange(W.shape[0]):\n for j in xrange(W.shape[1]):\n reg_term += W[i, j]**2\n\n loss += reg * reg_term\n\n # Calculate gradient\n P = np.zeros((num_train, num_classes))\n for i in xrange(num_train):\n for j in xrange(num_classes):\n P[i, j] = exp_f[i, j] / sum_exp_f[i]\n P[i, y[i]] -= 1\n\n for i in xrange(dW.shape[0]):\n for j in xrange(dW.shape[1]):\n dW[i, j] = 1 / num_train * np.dot(X[:, i].T, P[:, j])\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax(x):\n scoreMatExp = np.exp(np.asarray(x))\n return scoreMatExp / scoreMatExp.sum(0)", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True)", "def _softmax(x):\n e = K.exp(x - K.max(x, axis=-1, keepdims=True))\n s = K.sum(e, axis=-1, keepdims=True)\n return e / s", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n num_train = X.shape[0]\n num_class = W.shape[1]\n l = np.zeros([num_train,1])\n for i in range(num_train):\n scores = np.dot(X[i], W)\n f_yi = scores[y[i]]\n exp_num = np.exp(f_yi)\n exp = np.exp(scores)\n exp_deno = np.sum(exp)\n for j in range(num_class):\n if (j == y[i]):\n dW[:,j] -= X[i,:].transpose()\n dW[:,j] += (np.exp(scores[j]) / exp_deno) * X[i,:].transpose()\n l[i] = -np.log(exp_num/exp_deno)\n\n loss = np.sum(l)/num_train\n loss += reg * np.sum(W*W)\n dW /= num_train \n dW += 2 * reg * W\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def softmax(x):\r\n output = np.exp(x)\r\n return output / np.sum(output, axis=1, keepdims=True)", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_cases = X.shape[0]\n num_class = W.shape[1]\n y_label = np.zeros((num_cases,num_class))\n for i in range(num_cases):\n h1 = np.exp(X[i].dot(W))\n h = h1/np.sum(h1)\n y_label[i] = (np.arange(h.shape[0]) == y[i]) + 0\n loss -= (np.sum(y_label[i] * np.log(h) + (1 - y_label[i]) * np.log(1 - h)))\n delta = np.zeros(W.shape)\n for j in range(num_class):\n delta[:,j] += X[i]\n delta[:,j] *= h1[j]\n delta[:,j] *= (np.sum(h1) - h1[j])/(np.sum(h1) ** 2)\n delta[:,j] = y_label[i][j] / h[j] * delta[:,j] - (1 - y_label[i][j]) / (1 - h[j]) * delta[:,j]\n dW -= delta\n loss /= num_cases\n loss += reg * np.sum(W * W)\n dW /= num_cases\n dW += 2 * reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax(x):\n # https://stackoverflow.com/questions/34968722/softmax-function-python\n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def softmax_loss1(x, y):\n # tmp = np.max(x, axis=1, keepdims=True)\n shifted_logits = x - np.max(x, axis=1, keepdims=True)\n Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)\n log_probs = shifted_logits - np.log(Z)\n probs = np.exp(log_probs)\n N = x.shape[0]\n # tmp2 = np.arange(N)\n tmp3 = log_probs[np.arange(N), y]\n # tmp4 = log_probs[[0,1,2],[2,5,0]]\n loss = -np.sum(log_probs[np.arange(N), y]) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True)", "def softmax_loss(x, y):\n\n eps = 1e-5\n \n N,C = x.shape\n p = softmax(x)\n llikelihood = -np.log(p[range(N),y] + eps)\n# print(llikelihood)\n loss = np.sum(llikelihood) / N\n\n dx = p\n dx[range(N),y] -= 1\n dx = dx/N\n \n return loss, dx", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n num_classes = W.shape[1]\n #print('num_classes = ', num_classes)\n num_train = X.shape[0]\n #print('num_train = ', num_train)\n \n min_score = 0.0\n shifted_scores = np.zeros(W.shape[1])\n #max_score = np.zeros(W.shape[1])\n max_score = 0.0\n \n loss_array = np.zeros(y.shape[0])\n for i in range(num_train):\n scores = X[i].dot(W)\n #print('scores dimensions = ', scores.shape)\n #print('scores = ', scores)\n #print('i =', i, 'y = ', y[i])\n min_score = np.min(scores)\n max_score = np.max(scores)\n #print(min_score,max_score)\n shifted_scores = np.multiply(-1,scores + abs(min_score))\n #print(scores)\n #print(shifted_scores)\n exp_scores = np.exp(shifted_scores)\n norm = np.amax(exp_scores)\n norm_scores = np.divide(exp_scores,norm)\n loss_array[i] = np.multiply(-1,np.log(norm_scores[y[i]]/(np.sum(norm_scores)-norm_scores[y[i]])))\n #print(loss_array)\n for j in range(num_classes): \n\t\n if j == y[i]: \n dW[:,j] = np.multiply(norm_scores[y[i]],1-norm_scores[y[i]])\n else:\n dW[:,j] = np.multiply(-1,np.multiply(norm_scores[y[i]],norm_scores[y[j]]))\n\t\t\t\n\t\t\t\n loss = np.amax(loss_array)\n\n # Add regularization to the loss.\n loss = 0.5 * reg * np.sum(W * W) + loss\n \n \n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss(x, y):\n ############################################################################\n # TODO: You can use the previous softmax loss function here. # \n # Hint: Be careful on overflow problem #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n N = len(x)\n # We want to get the real y\n log_C = -np.max(x,axis=1,keepdims = True)\n # Get numerator\n e_all = np.exp(x+log_C)\n # Get the final prob\n prob = e_all/e_all.sum(axis=1,keepdims=True)\n # Find final loss\n loss = np.sum(-np.log(prob)[np.arange(N),y])/N\n # Get dx\n dx = prob\n dx[np.arange(N),y] -= 1\n dx /= N\n \n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return loss, dx", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[0]\n dim = dW.shape[0]\n num_classe = W.shape[1]\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n S = X.dot(W)\n # ajouter le - max a la fin\n indexes=np.arange(num_train)\n #c = correct class score\n c = S[indexes, y]\n\n e_syi = np.exp(c)\n e_sj = np.sum(np.exp(S), axis=1)\n Li = - np.log(e_syi/e_sj)\n loss = np.sum(Li) / num_train + reg * np.sum(W * W)\n\n\n M = np.exp(S)/(np.repeat(e_sj, num_classe).reshape(num_train, num_classe)) #(500,10)\n M[indexes, y] -= 1 #bonnes classes\n dW = X.T.dot(M)\n\n dW = dW/num_train + 2 * reg * W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax(x): \n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n # print \"dW's shape\", dW.shape\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax.ipynb loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # For every training image\n for train_image in xrange(num_train):\n # Multiply the weights by the image to get the scores\n scores = X[train_image].dot(W)\n # print(scores)\n # And then get the correct score\n correct_label = y[train_image]\n correct_score = scores[correct_label]\n # TODO: Right up to here\n # And then get the score of every other classifier\n all_scores = np.sum(scores)\n # Add a normalizing factor for numeric stability\n normalizing_constant = np.max(scores)\n scores -= normalizing_constant\n correct_score -= normalizing_constant\n #Calculating the softmax values\n softmax = np.exp(correct_score)/np.sum(np.exp(scores))\n\n # print(\"Correct score softmax\",softmax)\n\n # And calculating the loss\n loss += -1*np.log(softmax)\n # print loss\n #TODO: Loss computation is also correct\n\n # And calculating the gradient\n\n # First, update the Weight matrix with the correct example's derivative\n dW[:,correct_label] += (softmax-1)*np.transpose(X[train_image])\n\n # Then do the same for the wrong cases\n incorrect_labels = [x for x in xrange(num_classes) if x != correct_label]\n # Now, update the weights\n for label_index in incorrect_labels:\n #Calculating the softmax for a wrong label\n incorrect_label_softmax = np.exp(scores[label_index])/(np.sum(np.exp(scores)))\n # Calculating the derivative\n necessary_weight = incorrect_label_softmax*np.transpose(X[train_image])\n # Updating the weights\n dW[:,label_index] += necessary_weight\n\n\n # Divide the loss\n loss /= num_train\n dW /= num_train\n\n # Now, do regularization\n loss += 0.5*reg*np.sum(W*W)# Penalize big weights\n dW += reg*W\n\n\n\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax(x):\n npX = np.array(x)\n expX = np.exp(x)\n\n return expX/sum(expX)", "def softmax(x: npt.NDArray) -> npt.NDArray:\n row_wise_max = np.max(x, axis=1).reshape(-1, 1)\n exp_x = np.exp(x - row_wise_max)\n return exp_x / np.sum(exp_x, axis=1).reshape(-1, 1)", "def temporal_softmax_loss(x, y, mask, verbose=False):\n\n N, T, V = x.shape\n\n x_flat = x.reshape(N * T, V)\n y_flat = y.reshape(N * T)\n mask_flat = mask.reshape(N * T)\n\n # dividing by max doesn't hurt.. rather it makes the operand with in the exponential\n # more well behaved -- no very huge numbers.. Low numbers will be in -... which will also\n # become more well behaved.. low in magnitude say close to 0 will loose precision I guess or\n # say 0.000001 and 0..000005 after divisionn by a huge nummber willl be almost 0.\n # in any case, since the numerator and denominator will be divided by the same number\n # it doesn't make a diffference.. so softmax is np.exp(p)/sum(np.exp(p))\n # np.exp(p-p_max)/sum(np.exp(p-p_max)) is the same thing -- but perhaps numerical\n probs = np.exp(x_flat - np.max(x_flat, axis=1, keepdims=True))\n probs /= np.sum(probs, axis=1, keepdims=True)\n loss = -np.sum(mask_flat * np.log(probs[np.arange(N * T), y_flat])) / N\n\n # this gives the derivative\n dx_flat = probs.copy()\n dx_flat[np.arange(N * T), y_flat] -= 1\n dx_flat /= N\n dx_flat *= mask_flat[:, None]\n\n if verbose: print('dx_flat: ', dx_flat.shape)\n\n dx = dx_flat.reshape(N, T, V)\n\n return loss, dx", "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n num_train = X.shape[0]\n # print(\"num_train:\", num_train)\n num_classes = W.shape[1]\n # print(\"num_classes:\", num_classes)\n \n for i in range(num_train):\n scores = X[i].dot(W) # scores is 1 * C\n correct_class = y[i]\n \n # LOSS DUE TO TRAINING SAMPLE = -log(exp^correct_score / sum(exp^all_other_scores))\n log_c = np.max(scores)\n scores -= log_c\n correct_class_score = scores[correct_class]\n exp_scores = np.exp(scores)\n sum_exp_scores = np.sum(np.exp(scores))\n proportion = np.exp(correct_class_score) / sum_exp_scores\n loss -= np.log(proportion)\n # print(proportion)\n \n # ALTERNATIVELY: (we split the log)\n# loss -= scores[y[i]]\n# loss += np.log(np.sum(np.exp(X[i].dot(W))))\n \n # UPDATE GRADIENT\n for j in range(num_classes):\n p = np.exp(scores[j]) / sum_exp_scores # \"probability\" of class j\n dW[:,j] += (p - (j == y[i])) * X[i,:]\n # dW is D by C\n\n loss /= num_train\n loss += reg * np.sum(W * W) \n dW /= num_train\n dW += reg * 2 * W\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n \n return loss, dW", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train, num_dim = X.shape\n num_classes = W.shape[1]\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n scores = np.dot(X,W)\n # scores = scores.T - np.max(scores,1)\n # f = np.exp(scores.T) \n # correct_scores = f[range(num_train),y] #1*N\n # col_sum = np.sum(f,1)\n # loss = np.sum(-np.log(correct_scores/col_sum))\n\n # mat = f.T/col_sum #\n # mat = mat.T\n # y_pred = np.zeros(mat.shape)\n # y_pred[range(num_train),y] = 1\n # dW = np.dot(X.T,mat-y_pred)\n\n # loss/=num_train\n # loss += 0.5*reg*np.sum(W*W)\n # dW /= num_train\n # dW += reg*W\n f = scores.T - np.max(scores,1)\n f = f.T\n f_correct = scores[range(num_train),y]\n \n sum_col = np.log(np.sum(np.exp(scores),1)) # N*1\n \n loss = sum_col - f_correct # N*1\n loss = np.sum(loss)/num_train + 0.5*reg*np.sum(W*W)\n\n prob = np.exp(f).T / np.sum(np.exp(f),1)\n prob = prob.T\n y_pred = np.zeros(scores.shape)\n y_pred[range(num_train),y] = 1\n dW = X.T.dot(prob - y_pred)\n dW = dW/float(num_train) + reg*W\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=0)", "def softmax(x):\r\n e_x = np.exp(x - np.max(x))\r\n return e_x / e_x.sum(axis=0) # only difference\r", "def softmax(x):\r\n sum_c = np.sum(np.exp(x), axis=1)\r\n sum_c = np.expand_dims(sum_c, axis=1)\r\n pred_x = np.divide(np.exp(x), sum_c)\r\n return pred_x", "def my_softmax(x):\n x = x - np.max(x)\n exp_x = np.exp(x)\n softmax_x = exp_x / np.sum(exp_x)\n return softmax_x", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=1)", "def softmax(x):\n orig_shape = x.shape\n\n if len(x.shape) > 1:\n # Matrix\n tmp = np.max(x, axis=1)\n x -= tmp.reshape((x.shape[0], 1))\n x = np.exp(x)\n tmp = np.sum(x, axis=1)\n x /= tmp.reshape((x.shape[0], 1))\n else:\n # Vector\n tmp = np.max(x)\n x -= tmp\n x = np.exp(x)\n tmp = np.sum(x)\n x /= tmp\n\n assert x.shape == orig_shape\n return x", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n scores = X@W # 500,10\n# print(scores.shape)\n max_scores = np.max(scores, axis=1).reshape(-1,1) # 500, numeric instablity\n# print(max_scores.shape)\n scores -= max_scores # numeric instablity\n# print(scores.shape)\n correct_scores = scores[np.arange(scores.shape[0]), y] # 500,\n P_ic = np.exp(correct_scores)/np.sum(np.exp(scores), axis=1)\n# print(P)\n loss += np.sum(-np.log(P_ic))/scores.shape[0] # L = ∑L_i/N\n loss += reg * np.sum(W * W) # regularization\n # 向量化梯度:用scores构建一个P [500, 10],首先取exp(scores)得到每一个位置的exp,然后对每个位置除以这一行的exp和\n # 上面的操作会得到500,10的矩阵,每个位置都是softmax之后的结果\n # !重点:对于[i,y[i]]位置,根据P_ic - 1, 要减1 \n P = np.exp(scores) # 正确分类的梯度, 位于梯度矩阵所有c的行\n P /= np.sum(np.exp(scores),axis=1).reshape(-1, 1)\n P[np.arange(scores.shape[0]), y] -= 1 # 将 i, y[i] -= 1\n \n # 得到这个矩阵之后,与X.T相乘即可得到dL/dW P(500,10) X(500,3073) X.T (3073, 500) W(3073, 10)\n dW += X.T@P\n dW /= scores.shape[0] # *1/N\n dW += 2*reg*W # 正则化梯度\n \n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def softmax(x):\n e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))\n return e_x / np.sum(e_x, axis=1).reshape(-1, 1)", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[1]\n num_classes = W.shape[0]\n #############################################################################\n # Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n \n # compute scores\n scores = W.dot(X)\n scores -= np.max(scores)\n\n # softmax function\n softmax = np.exp(scores) / np.sum(np.exp(scores), 0) # 10 x 49000 | C x D\n \n # cross entropy loss\n loss = -np.log(softmax[y, range(num_train)]) # 49000\n loss = np.sum(loss) / num_train\n\n # regularisation\n loss += 0.5 * reg * np.sum(W*W)\n\n # gradient (source:https://github.com/MyHumbleSelf/cs231n/blob/master/assignment1/cs231n/classifiers/softmax.py)\n ind = np.zeros(softmax.shape)\n ind[y, range(num_train)] = 1\n dW = np.dot((softmax-ind), X.T)\n dW /= num_train\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_loss_vectorized(W, X, y, reg):\n\n #############################################################################\n # TODO: Compute the softmax.ipynb loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n train_images = X.shape[0]\n # Store all the scores in a matrix\n all_scores = np.dot(X,W)\n #First, calculate the normalizing constant for numeric stability\n constant = np.max(all_scores,axis=1)\n normalized_scores = np.transpose(np.subtract(np.transpose(all_scores),constant))\n\n #Then, calculate softmax for the correct scores\n exp_scores = np.exp(all_scores)\n # First, keep track of the sum of values per row\n exp_sum = np.sum(exp_scores,axis=1)\n\n # Finally, calculate the softmax score for every entry\n softmax_scores = np.transpose(exp_scores)/exp_sum # useful when computing gradient\n softmax_scores = np.transpose(softmax_scores)\n # And then, compute the loss\n loss_score = softmax_scores[range(train_images),y]\n loss_score = -1 * np.log(loss_score) #taking the logarithm\n loss += np.sum(loss_score)\n\n #Normalize and regularize the loss\n loss /= train_images\n loss += 0.5*reg*np.sum(W*W)\n\n #Finally, calculate a vectorized gradient\n\n # Calculate the derivative at the correct label\n softmax_scores[range(train_images),y] -= 1\n # Then, make a matrix containing all the gradient values\n gradient_values = np.dot(np.transpose(X),softmax_scores)\n gradient_values = gradient_values\n\n #FINALLY, update the gradient\n dW+= gradient_values\n #And normalize and regularize it\n dW /= train_images\n dW += reg*W\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax(self, x):\n\n out = np.zeros(x.shape)\n for i in range(x.shape[0]):\n max_x = x[i] - np.max(x[i])\n out[i] = np.exp(max_x) / np.sum(np.exp(max_x), axis=0)\n\n return out", "def softmax(x):\n return np.exp(x) / np.sum(np.exp(x), axis=0)\n # return ( x / np.sum(x, axis=0) )", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference", "def softmax_classifier(W, input, label, lamda):\n\n ############################################################################\n # TODO: Put your code here\n\n loss = 0.0\n num_train = input.shape[0]\n num_classes = W.shape[1]\n\n score = np.dot(input, W) # (N,C)\n prediction = np.argmax(score, axis=1)\n score -= np.max(score, axis=1, keepdims=True)\n\n # # cross entropy loss\n # # take exponent of the score and normalized with sum of all exponents.\n probs = np.exp(score) # (N,C)\n e_y = np.sum(np.multiply(probs,label), axis=1) # (N,) probability for correct class\n e_sum = np.sum(probs, axis=1) # (N,) sum of probability over all classes\n\n # implementation of loss equivalent l_i = -f_y_i + log sum_j(e^(f_j))\n # loss = np.sum(-np.log(e_y/e_sum)) # sum of -log across all samples.\n # loss /= num_train # average loss\n loss = np.sum(-1 * e_y) + np.sum(np.log(e_sum))\n loss /= num_train\n\n loss += lamda * np.sum(W * W) # regularization \n\n # Gradient\n delta_score = probs / e_sum.reshape(num_train,1) # (N,C)\n delta_score -= label # (NxC)\n gradient = np.dot(input.T, delta_score)\n gradient /= num_train\n gradient += lamda * 2 * W\n\n ############################################################################\n\n return loss, gradient, prediction", "def softmax(x): \n e_x = np.exp(x - np.max(x)) \n return e_x / e_x.sum()", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n x = e_x / e_x.sum()\n # Your code should be fast, so use a vectorized implementation using numpy,\n # don't use any loops.\n # With a vectorized implementation, the code should be no more than 2 lines.\n #\n # For numeric stability, use the identify you proved in Ex 2 Q1.\n return x", "def softmax_loss_vectorized(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n\n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n num_train = X.shape[0]\n # print(\"num_train:\", num_train)\n num_classes = W.shape[1]\n # print(\"num_classes:\", num_classes)\n \n scores = X.dot(W) # scores is N*D x D*C -> N*C \n log_c = np.max(scores, axis=1).T\n scores -= log_c[:,None]\n correct_class_score = scores[np.arange(num_train),y]\n exp_scores = np.exp(scores)\n sum_exp_scores = np.sum(np.exp(scores), axis=1)\n proportion = np.exp(correct_class_score) / sum_exp_scores\n loss -= np.sum(np.log(proportion))\n \n # calculating dW = (p - (c = correct c ? 1 : 0)) * x\n correct_class_one_hot = np.zeros_like(scores)\n correct_class_one_hot[np.arange(num_train),y] += 1\n p = np.exp(scores) / sum_exp_scores[:,None] - correct_class_one_hot # N*C / N:1 -> N*C\n dW += X.T.dot(p) # D*N x N*C -> D*C\n\n loss /= num_train\n loss += 0.5 * reg * np.sum(W * W) \n dW /= num_train\n dW += reg * W\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, dW", "def softmax(x):\n exps = np.exp(x - np.max(x, axis=0))\n return exps / exps.sum(axis=0)", "def softmax(X):\n _X = X - np.max(X, axis=1).reshape(-1, 1)\n ep = np.exp(_X)\n return ep / np.sum(ep, axis=1).reshape(-1, 1)", "def softmax_loss_vectorized(W, X, y, reg):\r\n # Initialize the loss and gradient to zero.\r\n loss = 0.0\r\n dW = np.zeros_like(W)\r\n\r\n #############################################################################\r\n # Compute the softmax loss and its gradient using no explicit loops. #\r\n # Store the loss in loss and the gradient in dW. If you are not careful #\r\n # here, it is easy to run into numeric instability. Don't forget the #\r\n # regularization! #\r\n #############################################################################\r\n m = X.shape[1]\r\n f = W.dot(X)\r\n f -= np.max(f,axis = 0)\r\n exp_score = np.exp(f)\r\n probs = exp_score/np.sum(exp_score,axis = 0)\r\n corect_logprobs = -np.log(probs[y,range(m)])\r\n loss = np.sum(corect_logprobs)/m\r\n \r\n loss += 0.5*reg*np.sum(W*W)\r\n\r\n dscore = probs\r\n dscore[y,range(m)] -= 1 #C*N\r\n\r\n dW = np.dot(dscore,X.T)/m #x.T:n*d\r\n dW+= reg*W\r\n\r\n return loss, dW", "def softmax(x):\n\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()", "def softmax(input, dim, inplace=False):\n return FunctionLib.apply(\n 'Softmax', input.device, [input],\n outputs=[input if inplace else None], axis=dim)", "def softmax(x):\n e_x = np.exp((x.transpose()-x.max(axis=1)).transpose())\n return e_x / np.sum(e_x,axis=1)[:,None]", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)", "def _softmax(self, x):\n return np.exp(x - np.max(x)) / np.sum(np.exp(x - np.max(x)))", "def softmax(x):\n xx = x\n x = x.reshape((-1, x.shape[-1]))\n e_x = np.exp(x - np.max(x, 1).reshape(-1, 1))\n res = e_x / e_x.sum(axis=1).reshape(-1, 1)\n return res.reshape(xx.shape)", "def softmax(x):\n return np.exp(x)/np.sum(np.exp(x),axis=0)", "def softmax(x):\n return np.exp(x)/np.sum(np.exp(x),axis=0)", "def softmax_loss_vectorized(W, X, y, reg):\n num_train = X.shape[0]\n num_class = W.shape[1]\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n scores = X.dot(W)\n temp_matrix = np.zeros(scores.shape)\n \n max_each_row = np.max(scores,axis=1).reshape(-1,1)\n scores -= max_each_row\n summation = np.sum(np.exp(scores),axis=1).reshape(-1,1)\n scores = np.exp(scores)\n scores = np.divide(scores,summation)\n temp_matrix[range(num_train),list(y)] =-1\n scores += temp_matrix\n dW = X.T.dot(scores) / num_train + 2*reg*W \n log_summation = np.log(summation)\n vector = scores[range(num_train),list(y)].reshape(-1,1) \n L = -vector+ log_summation \n loss = np.sum(L)/num_train + reg*np.sum(W*W)\n \n #############################################################################\n # TODO: Compute the softmax loss and its gradient using no explicit loops. #\n # Store the loss in loss and the gradient in dW. If you are not careful #\n # here, it is easy to run into numeric instability. Don't forget the #\n # regularization! #\n #############################################################################\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=1)", "def softmax(x):\r\n e_x = np.exp(x - np.max(x))\r\n return e_x / e_x.sum()", "def softmax(x):\r\n e_x = np.exp(x - np.max(x))\r\n return e_x / e_x.sum()", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()" ]
[ "0.76837397", "0.75555336", "0.755513", "0.75321347", "0.7522562", "0.7514906", "0.7509924", "0.74732995", "0.747066", "0.74670047", "0.7465988", "0.7429649", "0.7429498", "0.74292433", "0.74264807", "0.74254495", "0.7410896", "0.7408939", "0.7406985", "0.738524", "0.7385114", "0.7373018", "0.73678154", "0.73654824", "0.7359422", "0.7358886", "0.7337897", "0.7337429", "0.73363215", "0.732422", "0.7312579", "0.7310264", "0.7306886", "0.73021233", "0.72969854", "0.729446", "0.7293986", "0.7288037", "0.72835076", "0.7283115", "0.72773975", "0.7266634", "0.7253976", "0.72536325", "0.7240069", "0.7228559", "0.7221276", "0.72203195", "0.7214842", "0.72115564", "0.7201238", "0.7201238", "0.7201238", "0.7201238", "0.7201238", "0.7201238", "0.7201238", "0.7201238", "0.71970946", "0.7192801", "0.7191997", "0.7189091", "0.7189091", "0.7189091", "0.7189091", "0.71824104", "0.7180031", "0.71779555", "0.7173017", "0.717289", "0.7166203", "0.71619934", "0.7160366", "0.71599674", "0.7159205", "0.7158985", "0.71581006", "0.71512604", "0.7134689", "0.712987", "0.7126593", "0.71233684", "0.71232677", "0.7121511", "0.7119023", "0.7119023", "0.7119023", "0.7119023", "0.7119023", "0.7118867", "0.7118194", "0.71159786", "0.71159786", "0.71151686", "0.71135163", "0.7109449", "0.7109449", "0.7098417", "0.7098417", "0.7098417", "0.7098417" ]
0.0
-1
Initialize the axis ranges from proviuded Plot or renderer.
def initialize_axis_ranges(self, plot, transform=None): if transform is None: def transform(x): return x elif isinstance(transform, int): ndigits = transform def transform(x): return round(x, ndigits) # Avoid UI polluting with non-sensical digits self.x_axis_range_low = transform(plot.x_axis.mapper.range.low) self.auto_x_axis_range_low = self.x_axis_range_low self.x_axis_range_high = transform(plot.x_axis.mapper.range.high) self.auto_x_axis_range_high = self.x_axis_range_high self.y_axis_range_low = transform(plot.y_axis.mapper.range.low) self.auto_y_axis_range_low = self.y_axis_range_low self.y_axis_range_high = transform(plot.y_axis.mapper.range.high) self.auto_y_axis_range_high = self.y_axis_range_high
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_axes(self):\n mini, maxi = self._get_extremes()\n self.y_axis.min = mini\n self.y_axis.max = maxi\n self.y_axis._max_min()\n\n if not None in [s.xvalues for s in self]:\n mini, maxi = self._get_extremes('xvalues')\n self.x_axis.min = mini\n self.x_axis.max = maxi\n self.x_axis._max_min()", "def initialize_plot(self, ranges=None):\n raise NotImplementedError", "def initialize_axes(self):\r\n self.x_lim = np.array([self.vals[:, 0].min(), self.vals[:, 0].max()])\r\n self.y_lim = np.array([self.vals[:, 1].min(), self.vals[:, 1].max()])\r\n self.z_lim = np.array([self.vals[:, 2].min(), self.vals[:, 2].max()])", "def _axes_domain(self, *args, **kwargs):\n # See _add_gridline_label for detials\n lon_0 = self.axes.projection.proj4_params.get('lon_0', 0)\n x_range, y_range = type(self)._axes_domain(self, *args, **kwargs)\n x_range = np.asarray(x_range) + lon_0\n return x_range, y_range", "def initAxisValues(self, axis):\n \n if (axis != None):\n if self.isTime:\n self.axisValues = [repr(t.tocomponent())\n for t in axis.asRelativeTime()]\n else:\n self.axisValues = axis.getValue()\n else:\n raise TypeError(\"Error: axis is not defined\")\n\n self.axisIndices = range(len(self.axisValues))\n self.updateMin(0)\n self.updateMax(len(self.axisValues) - 1)", "def _InitAxes( self ):\n self.ax = self.fig.add_subplot( 111 )", "def __init__(self, fig, variables, ranges, n_ordinate_levels=6):\n angles = np.arange(0, 360, 360./len(variables))\n axes = [fig.add_axes([0.1, 0.1, 0.9, 0.9], polar=True,\n label=\"axes{}\".format(i)) for i in range(len(variables))]\n for ax in axes[1:]:\n ax.patch.set_visible(False)\n ax.grid(\"off\")\n ax.xaxis.set_visible(False)\n for i, ax in enumerate(axes):\n grid = np.linspace(*ranges[i], num=n_ordinate_levels)\n gridlabel = [\"{}\".format(round(x, 2)) for x in grid]\n if ranges[i][0] > ranges[i][1]:\n grid = grid[::-1] # hack to invert grid\n gridlabel[0] = \"\" # clean up origin\n set_rgrids(ax, grid, labels=gridlabel, angle=angles[i])\n ax.set_ylim(*ranges[i])\n # variables for plotting\n self.angle = np.deg2rad(np.r_[angles, angles[0]])\n self.ranges = ranges\n self.ax = axes[0]", "def _use_data_bounds_changed_for_axes(self):\n self.update_pipeline()", "def set_range(self, axis: int, range: Sequence[Union[int, float]]):\n if axis < 0:\n axis += self.ndim\n if axis < 0:\n raise ValueError(\n f'axis is negative, expected positive, got {axis}'\n )\n if self.range[axis] != range:\n self._range[axis] = range\n self.events.range(axis=axis)", "def set_range(\n self,\n axis: Union[int, Sequence[int]],\n _range: Union[\n Sequence[Union[int, float]], Sequence[Sequence[Union[int, float]]]\n ],\n ):\n if isinstance(axis, Integral):\n axis = assert_axis_in_bounds(axis, self.ndim) # type: ignore\n if self.range[axis] != _range:\n full_range = list(self.range)\n full_range[axis] = _range\n self.range = full_range\n else:\n full_range = list(self.range)\n # cast range to list for list comparison below\n _range = list(_range) # type: ignore\n axis = tuple(axis) # type: ignore\n if len(axis) != len(_range):\n raise ValueError(\n trans._(\"axis and _range sequences must have equal length\")\n )\n if _range != full_range:\n for ax, r in zip(axis, _range):\n ax = assert_axis_in_bounds(int(ax), self.ndim)\n full_range[ax] = r\n self.range = full_range", "def __init__(self, axes=()):\n self._axes = []\n self._dimension = 0\n for axis in axes:\n self.add_axis(axis)", "def populate_plot_axis(self,plot,ax='x'):\n\n fig=plt.gcf()\n\n extra_ax=[]\n\n if ax=='x':\n\n ticks=plot.get_xticks()\n\n lim=plot.get_xlim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['bottom'].set_position(('outward',10))\n\n axn.spines['bottom'].set_visible(True)\n\n else:\n\n dy_fig=0.08\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0,\\\n prev_ax_position.y0-2*dy_fig,\\\n prev_ax_position.width,\\\n 0),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.yaxis.set_visible(False)\n\n for side in axn.spines.keys():\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_xticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_xticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n xlab=axn.set_xlabel(self.names[i])\n\n xlab.set_fontsize(10)\n\n axn.tick_params(axis='x',labelsize=10)\n\n axn.set_xlim(lim)\n\n\n\n elif ax=='y':\n\n ticks=plot.get_yticks()\n\n lim=plot.get_ylim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['left'].set_position(('outward',10))\n\n axn.spines['left'].set_visible(True)\n\n else:\n\n dx_fig=0.08\n\n plot_position=plot.get_position()\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0-2*dx_fig,\\\n prev_ax_position.y0,\\\n 0,\\\n prev_ax_position.height),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.xaxis.set_visible(False) # hide the yaxis\n\n for side in axn.spines.keys(): # 'top', 'bottom', 'left', 'right'\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_yticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_yticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n ylab=axn.set_ylabel(self.names[i])\n\n ylab.set_fontsize(10)\n\n axn.tick_params(axis='y',labelsize=10)\n\n axn.set_ylim(lim)\n\n else:\n\n raise ValueError(\"Axis can be 'x' or 'y'\")", "def set_range(self, **rangekwargs):\n\n if 'xrange' in rangekwargs.keys(): \n xrange = rangekwargs['xrange']\n else: \n xrange = [-50.0, 50.0] # (default)\n\n if 'yrange' in rangekwargs.keys(): \n yrange = rangekwargs['yrange']\n else: \n yrange = [0.0, 1.25 * self.hist_max]\n\n self.sub.set_xlim(xrange) \n self.sub.set_ylim(yrange) \n\n self.sub.set_xlabel(r\"$\\mathtt{d_{LOS}}$ (Mpc/h)\", fontsize=20)\n\n return None", "def _ps_init(self):\n\n self.ps_ax.set_xlim(-np.pi, np.pi)\n self.ps_ax.set_ylim(-10, 10)\n self.ps_ax.set_xlabel(\"degree [rad]\")\n self.ps_ax.set_ylabel(\"velocity [rad/s]\")\n for ap in self.ps_plots:\n ap.set_data([], [])\n return self.ps_plots", "def set_initial_dims(self, axis, insert=False):\n if insert:\n # Insert default values\n # Range value is (min, max, step) for the entire slider\n self._range.insert(axis, (0, 2, 1))\n # Point is the slider value if in point mode\n self._point.insert(axis, 0)\n # Interval value is the (min, max) of the slider selction\n # if in interval mode\n self._interval.insert(axis, (0, 1))\n self._mode.insert(axis, DimsMode.POINT)\n cur_order = [o if o < axis else o + 1 for o in self.order]\n self._order = [axis] + cur_order\n else:\n # Range value is (min, max, step) for the entire slider\n self._range[axis] = (0, 2, 1)\n # Point is the slider value if in point mode\n self._point[axis] = 0\n # Interval value is the (min, max) of the slider selction\n # if in interval mode\n self._interval[axis] = (0, 1)\n self._mode[axis] = DimsMode.POINT\n self._order[axis] = axis", "def update_plots_using_region(self):\n self.frequency_plot_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)\n self.resistance_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)\n self.temperature_plot_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)\n self.pressure_plot_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)\n self.humidity_plot_graph.setXRange(\n *self.linear_region.getRegion(), padding=0)", "def _handle_axes(self, drawable, option):\n # If we already have an axes object, ignore this one\n if self._axes_object is not None:\n return\n\n # Grab the histogram used for axes style/range manipulation\n if is_stack(drawable) or is_graph(drawable):\n axes_histogram = drawable.GetHistogram()\n else:\n axes_histogram = drawable\n\n # Grab the histogram used for title manipulation\n if is_stack(drawable):\n title_histogram = drawable.GetHists()[0]\n else:\n title_histogram = drawable\n\n # Set the plot title\n title_histogram.SetTitle(self._title)\n\n # Grab axes\n x_axis, y_axis = axes_histogram.GetXaxis(), axes_histogram.GetYaxis()\n\n # Grab titles from first histogram if not set explicitly\n if self._x_title is None:\n self._x_title = title_histogram.GetXaxis().GetTitle()\n if self._y_title is None:\n self._y_title = title_histogram.GetYaxis().GetTitle()\n\n # Style x-axis, or hide it if this plot has a ratio plot\n if self._x_range is not None:\n x_axis.SetRangeUser(*self._x_range)\n if self._ratio_plot:\n x_axis.SetLabelOffset(999)\n x_axis.SetTitleOffset(999)\n else:\n x_axis.SetTitle(self._x_title)\n x_axis.SetTitleSize(self.PLOT_X_AXIS_TITLE_SIZE)\n x_axis.SetTitleOffset(self.PLOT_X_AXIS_TITLE_OFFSET)\n x_axis.SetLabelSize(self.PLOT_X_AXIS_LABEL_SIZE)\n if self._x_integer_ticks:\n x_axis.SetNdivisions(11) # hack for integer ticks \n\n # Style y-axis\n y_axis.SetTitle(self._y_title)\n y_axis.SetLabelFont(self.PLOT_ATLAS_STAMP_TEXT_FONT)\n y_axis.SetTitleSize(\n (self.PLOT_Y_AXIS_TITLE_SIZE_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_SIZE)\n )\n y_axis.SetTitleOffset(\n (self.PLOT_Y_AXIS_TITLE_OFSET_WITH_RATIO\n if self._ratio_plot\n else self.PLOT_Y_AXIS_TITLE_OFFSET)\n )\n y_axis.SetNdivisions(5,5,0)\n \n # set axis text sizes \n if self._ratio_plot:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE_WITH_RATIO)\n else:\n y_axis.SetLabelSize(self.PLOT_Y_AXIS_LABEL_SIZE) \n y_axis.SetTitleSize(self.PLOT_Y_AXIS_TITLE_SIZE)\n y_axis.SetTitleOffset(self.PLOT_RATIO_Y_AXIS_TITLE_OFFSET)\n\n # Redraw the drawable with the new style\n drawable.Draw(option)", "def set_visualization_range(self, start: int, end: int):\n self.__range = (start, end)", "def render_range_init():\n\n # Adding/Checking ftrack render range attribute\n defaultRenderGlobals = pm.PyNode(\"defaultRenderGlobals\")\n render_range_set = False\n if hasattr(defaultRenderGlobals, \"ftrackRenderRangeSet\"):\n attr = pm.Attribute(\"defaultRenderGlobals.ftrackRenderRangeSet\")\n render_range_set = attr.get()\n else:\n pm.addAttr(\n defaultRenderGlobals,\n longName=\"ftrackRenderRangeSet\",\n defaultValue=True,\n attributeType=\"bool\"\n )\n\n if not render_range_set:\n\n task = ftrack.Task(os.environ[\"FTRACK_TASKID\"])\n\n startFrame = float(task.getParent().get(\"fstart\"))\n endFrame = float(task.getParent().get(\"fend\"))\n\n handles = float(task.getParent().get(\"handles\"))\n\n mc.warning(\n \"Setting render range to {0} {1} \".format(startFrame, endFrame)\n )\n\n # Add handles to start and end frame\n hsf = startFrame - handles\n hef = endFrame + handles\n\n defaultRenderGlobals.animation.set(True)\n defaultRenderGlobals.animationRange.set(1)\n defaultRenderGlobals.startFrame.set(hsf)\n defaultRenderGlobals.endFrame.set(hef)\n\n # Vray specific resolution\n if pm.objExists(\"vraySettings\"):\n vray_settings = pm.PyNode(\"vraySettings\")\n vray_settings.animType.set(1)", "def _plot_init(self):\n pass", "def _plot_init(self):\n pass", "def py_apply_limits(self, plot):\n if any(x is not None for x in self.x_lim):\n if self.x_lim[0] is not None: # at least left?\n if self.x_lim[1] is not None: # left and right?\n plot.set_xlim(left=self.x_lim[0], right=self.x_lim[1])\n else:\n plot.set_xlim(left=self.x_lim[0])\n else: # just right\n plot.set_xlim(rigt=self.x_lim[1])\n if any(y is not None for y in self.y_lim):\n if self.y_lim[0] is not None: # at least bottom?\n if self.y_lim[1] is not None:\n plot.set_ylim(bottom=self.y_lim[0], top=self.y_lim[1])\n else:\n plot.set_ylim(bottom=self.y_lim[0])\n else:\n plot.set_ylim(top=self.y_lim[1])", "def init_axes(self):\n plt.switch_backend(\"cairo\")\n fig = plt.figure(figsize=(15,10))\n ax = fig.add_axes([0.05, 0.15, 0.9, 0.80,])\n return (fig, ax)", "def init_plot(self, num_axes):\r\n self.i = []\r\n self.val = []\r\n plt.ion()\r\n self.axes = plt.gca()\r\n self.lines =[]\r\n\r\n for i in range(num_axes):\r\n self.val.append([])\r\n self.lines.append([])\r\n self.lines[i], = self.axes.plot([], self.val[0], '-', c=[random.random() for _ in range(3)], linewidth=1.5, markersize=4)", "def setlimits(self, Xlim=[], Ylim=[]):\n self.data['Xmin'] = Xlim[0]\n self.data['Xmax'] = Xlim[1]\n self.data['Ymin'] = Ylim[0]\n self.data['Ymax'] = Ylim[1]", "def setAxisParts(lowx='all', lefty='all', upx='ticks', righty='ticks'):\n partdict = {'none':'NONE','lines':'LINE','ticks':'TICKS',\n 'labels':'LABELS', 'all':'NAME'} \n dislin.setgrf(partdict[lowx], partdict[lefty],\\\n partdict[upx], partdict[righty])", "def _init(self) -> List[PlotType]:\n self.plots[0].set_data([], [], 'bx', markersize=5)\n self.plots[1].set_data([], [], 'r.', markersize=15)\n return self.plots", "def __init__(self, axis1, axis2=None, bins=100, same_scale=False,\n axis1_values=None, axis2_values=None, **kwargs):\n self.same_scale = same_scale\n\n self.axis1 = axis1\n self.axis1_limits = None\n\n if isinstance(axis1_values, (float, int)):\n axis1_values = [axis1_values]\n self.axis1_values = axis1_values\n\n self.axis2 = axis2\n self.axis2_limits = None\n if isinstance(axis2_values, (float, int)):\n axis2_values = [axis2_values]\n self.axis2_values = axis2_values\n\n self.bins = bins\n\n self.plot_options = kwargs", "def setValues(self, values):\n if values is not None:\n self.scale_min, self.scale_max = values\n if self.scale_min is None:\n self.scale_min = self.start\n if self.scale_max is None:\n self.scale_max = self.end\n else:\n self.scale_min = self.start\n self.scale_max = self.end\n self.emitRange()\n self.updateDisplayValues()\n self.update()", "def setRange(self, x_range, y_range):\n self._visualiser._plt.setRange(xRange=x_range, yRange=y_range)", "def __init__(self, axes: int):\n self.axes = axes", "def _update_axes(self):\n data_shape = self.data.shape\n if len(self.axes) < self.data.ndim + 1:\n self._axes.append(Axis())\n for index in range(self.data.ndim):\n if len(self.axes[index].values) != data_shape[index]:\n self.axes[index].values = np.arange(data_shape[index],\n dtype=np.float64)", "def setup_axes():\n\taxes = visuals.subplots(1, 2, figsize = (14, 7))\n\taxes[1].set_yscale(\"log\")\n\taxes[0].set_xlabel(\"[Fe/H]\")\n\taxes[0].set_ylabel(\"[Sr/Fe]\")\n\taxes[1].set_xlabel(\"[Sr/Fe]\")\n\taxes[1].set_ylabel(\"Stellar Probability Density\")\n\taxes[0].set_xlim([-2.2, 0.2])\n\taxes[0].set_ylim([-2.4, 0.4])\n\taxes[1].set_xlim([-1.4, 0.4])\n\taxes[1].set_ylim([0.05, 50])\n\treturn axes", "def set_up(self):\n self.h, = self.ax.plot(self.x, lw=2)\n self.ax.set_ylim(0,100)\n self.ax.set_xlim(0,100)\n self.ax.title.set_text(self.config[\"title\"])\n self.ax.set_xlabel(self.config[\"x_label\"])\n self.ax.set_ylabel(self.config[\"y_label\"])", "def autoHistogramRange(self):\n self.vb.enableAutoRange(self.vb.XAxis, True)\n self.vb.enableAutoRange(self.vb.YAxis, True)\n # self.range = None\n # self.updateRange()\n # self.vb.setMouseEnabled(False, False)\n\n # def updateRange(self):\n # self.vb.autoRange()\n # if self.range is not None:\n # self.vb.setYRange(*self.range)\n # vr = self.vb.viewRect()\n\n # self.region.setBounds([vr.top(), vr.bottom()])", "def setupPlotVariables(self):\n\n ### Borrowed from Thomas' plot routines\n self.plotLabels = [r'$m_1$', r'$m_2$', r'eccentricity', \\\n r'period (days)', \\\n r'inclination (rad)',r'$\\omega$ (rad)',r'$t_0$',r'$\\alpha$ (rad)']\n\n ### Change these to update the plot ranges for each\n ### parameter. \n angOut = np.pi+0.3\n self.plotLimsLo = [1.0, -1.0, -0.2, -1.0, -angOut, -angOut, -10,0]\n self.plotLimsHi = [2.2, 10.0, 1.2, 35.0, angOut, angOut, 10,1.2]\n\n ### We specify the method for the uniformly-spaced grid. If we\n ### want to make one of these logspace (say) we just change\n ### the method identified in the appropriate place in the\n ### list.\n nMeth = len(self.plotLimsLo)\n self.plotSpacerMethods = [np.linspace for i in range(nMeth)]\n\n self.plotNfine = 1000 ### number of fine points to use\n self.plotNcols = 3 ### number of columns in the plot\n\n self.plotNrows = int(np.ceil(nMeth/float(self.plotNcols)) )", "def _set_axes(self):\n self += helper.line(stroke=\"black\", x1=self.__dict__['x'], x2=self.__dict__['x'], y1=0, y2=self.__dict__['y']*2)\n self += helper.line(stroke=\"black\", x1=0, x2=self.__dict__['x']*2, y1=self.__dict__['y'], y2=self.__dict__['y'])", "def setAxisPageOrigin(x,y):\n dislin.axsorg(x,y)", "def setRange(self, x_range, y_range):\n pass", "def set_colormap_range(self):\n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n region = self.plot.getHistogramWidget().region\n\n if(self.sender() == region):\n cmin.setText(str(region.getRegion()[0]))\n cmax.setText(str(region.getRegion()[1]))\n return\n\n # Sometimes the values in the lineEdits are\n # not proper floats so we get ValueErrors\n try:\n # If necessary swap min and max\n if(float(cmin.text()) > float(cmax.text())):\n _tmp = cmin.text()\n cmin.setText(cmax.text())\n cmax.setText(_tmp)\n\n region = [float(cmin.text()), float(cmax.text())]\n self.plot.getHistogramWidget().region.setRegion(region)\n except ValueError:\n return", "def setup_mpl_visuals(self, axes=None) -> None:\n if axes is None:\n axes = self.subplot\n axes.patch.set_facecolor('white')\n axes.set_aspect('equal', 'box')\n axes.set_xlim(-10, 10, auto=True)\n axes.set_ylim(-10, 10, auto=True)\n # TODO: Make XYLim confort to window size/dimensions\n axes.set_xticks([])\n axes.set_yticks([])\n self.figure.subplots_adjust(bottom=0, top=1, left=0, right=1)\n axes.axis('off')", "def _set_min_max_values(self):\n\n p_1, p_2 = self.points[0], self.points[1]\n nb_dim = len(p_1.values)\n self._min_values = []\n self._max_values = []\n for d in range(nb_dim):\n d_min = min(p_1[d], p_2[d])\n d_max = max(p_2[d], p_2[d])\n self._min_values.append(d_min)\n self._max_values.append(d_max)", "def init_range_variables(self):\n self.range_start_vars_array = []\n self.range_end_vars_array = []\n\n for idx in range(len(self._pk_for_filter)):\n self.range_start_vars_array.append(\"@range_start_{}\".format(idx))\n self.range_end_vars_array.append(\"@range_end_{}\".format(idx))\n self.range_start_vars = \",\".join(self.range_start_vars_array)\n self.range_end_vars = \",\".join(self.range_end_vars_array)", "def axInit():\n ax.init()", "def __init__(self, xRange, yData):\n\n self.xRange = np.array(xRange)\n self.yData = np.array(yData)", "def setHistogramRange(self, mn, mx, padding=0.1):\n self.vb.enableAutoRange(self.vb.YAxis, False)\n if self.orientation == 'horizontal':\n self.vb.setXRange(mn, mx, padding)\n elif self.orientation == 'vertical':\n self.vb.setYrange(mn, mx, padding)\n # mn -= d*padding\n # mx += d*padding\n # self.range = [mn,mx]\n # self.updateRange()\n # self.vb.setMouseEnabled(False, True)\n # self.region.setBounds([mn,mx])", "def setX(ax1: Union[object, List], ax2: Union[object, List]):\n if type(ax1) is list:\n print(\"PlotHelpers: cannot use list as source to set Y axis\")\n return\n ax2 = _ax_tolist(ax2)\n # if type(ax2) is not list:\n # ax2 = [ax2]\n refx = ax1.get_xlim()\n for ax in ax2:\n ax.set_xlim(refx)", "def initPlotY(self):\n\n self.plotFineY = [np.array([]) for i in range(len(self.plotFineX))]", "def _set_axes_limits(ax, parameter, axis=\"x\"):\n\n lims = list(ax.get_xlim()) if axis == \"x\" else list(ax.get_ylim())\n\n if \"low\" in DEFAULT_BOUNDS[parameter]:\n low = DEFAULT_BOUNDS[parameter][\"low\"]\n if lims[0] < low:\n lims[0] = DEFAULT_BOUNDS[parameter][\"low\"]\n if \"high\" in DEFAULT_BOUNDS[parameter]:\n high = DEFAULT_BOUNDS[parameter][\"high\"]\n if lims[1] > high:\n lims[1] = DEFAULT_BOUNDS[parameter][\"high\"]\n\n if axis == \"x\":\n ax.set_xlim(lims)\n else:\n ax.set_ylim(lims)", "def update_xylims_extremes(xlims_extremes, ylims_extremes):\n xlims, ylims = plt.xlim(), plt.ylim()\n xlims_extremes = [\n min(xlims[0], xlims_extremes[0]),\n max(xlims[1], xlims_extremes[1])\n ]\n ylims_extremes = [\n min(ylims[0], ylims_extremes[0]),\n max(ylims[1], ylims_extremes[1])\n ]\n return xlims_extremes, ylims_extremes", "def setup_axes(self):\n fig = plt.figure(1)\n axs = fig.add_subplot(1, 1, 1)\n fig.clf()\n axs = plt.subplots(1, 2)\n ax1 : plt.axis = axs[0]\n ax2 : plt.axis = axs[1]\n fig.canvas.draw()\n \n line1_t, = ax1.plot([], label='train')\n line1_v, = ax1.plot([], label='val')\n\n ax1.set_title('Loss vs Iterations')\n ax1.set_xlabel('Iterations')\n ax1.set_ylabel('Loss')\n ax1.grid(True)\n ax1.autoscale()\n # ax1.legend()\n\n line2_t, = ax2.plot([], label='train')\n line2_v, = ax2.plot([], label='val')\n\n ax2.set_title('Accuracy vs Iterations')\n ax2.set_xlabel('Time')\n ax2.set_ylabel('Percent Accuracy')\n ax2.grid(True)\n ax2.autoscale()\n # ax2.legend()\n\n lines = [line1_t, line1_v, line2_t, line2_v]\n\n return fig, ax1, ax2, lines", "def set_rrng(self, rrng, subplot_index=(0,)):\n if self.axes is not None:\n self.axes[subplot_index].set_rmin(rrng[0])\n self.axes[subplot_index].set_rmax(rrng[1])\n self.rrng = rrng\n else:\n raise RuntimeError((\"Axes must be initialized before\" +\n \" changing limits!\"))", "def __init__(self, *args, **kwargs):\n # Set tick length to zero so azimuthal labels are not too offset\n # Change default radial axis formatter but keep default theta one\n super().__init__(*args, **kwargs)\n formatter = axistools.Formatter('auto')\n self.yaxis.set_major_formatter(formatter)\n self.yaxis.isDefault_majfmt = True\n for axis in (self.xaxis, self.yaxis):\n axis.set_tick_params(which='both', size=0)", "def plot_init(bottom_left: Point, top_right: Point):\n global figure\n global axes\n\n plt.ion()\n figure, axes = plt.subplots(1, 1)\n axes.set_xlim(bottom_left[0], top_right[0])\n axes.set_ylim(bottom_left[1], top_right[1])\n axes.set_aspect(\"equal\", adjustable=\"box\")", "def _create_axis(\n self,\n range_terms: Sequence[float],\n axis_config: dict,\n length: float,\n ) -> NumberLine:\n axis_config[\"length\"] = length\n axis = NumberLine(range_terms, **axis_config)\n\n # without the call to _origin_shift, graph does not exist when min > 0 or max < 0\n # shifts the axis so that 0 is centered\n axis.shift(-axis.number_to_point(self._origin_shift([axis.x_min, axis.x_max])))\n return axis", "def initialize_portrait(ax_pos=[0.1, 0.10, 0.8, 0.60]):\n fig = plt.figure(figsize=(1.5 * 5, 1.5 * 7))\n # axes constructor axes([left, bottom, width, height])\n ax = plt.axes(ax_pos)\n return fig, ax", "def __init__(self, xRange, yData, max_width, min_width, max_gap):\n\n super(Classic, self).__init__(xRange, yData)\n self.max_width = max_width\n self.min_width = min_width\n self.max_gap = max_gap", "def updateRange(self):\n if self.autoFollow:\n self.xrange = self.param.activeRange()\n self.xrange = self.xrange # call getter & setter again to verify limits", "def setup_hist(self):\n self.x_min = {}\n self.x_max = {}\n self.x_max_minus_min = {}\n self.dx = {}\n self.n_bins = {}\n\n self.histogram_edges = {}\n self.histogram_values = {}\n self.histogram_cdf = {}", "def auto_adjust_axes(self, *args):\n\n xmin, xmax = self.axes.get_xlim()\n ymin, ymax = self.axes.get_ylim()\n self.adjust_axes(xmin, ymin, xmax, ymax)", "def update_simulate_plot(self):\n a = self.plot_zoom.getViewBox().viewRange()\n self.plot_simulate.setXRange(a[0][0], a[0][1])\n self.plot_simulate.setYRange(a[1][0], a[1][1])", "def init_fig(self, fig):\n # type: (Figure) -> None\n self.init_vars()\n\n self.xs, self.ys = np.meshgrid(np.arange(0., self.max_iter+.5)-.5, np.arange(0., self.n_vars+.5)-.5)\n self.cs = np.zeros((self.n_vars, self.max_iter))\n\n self.ax = fig.add_subplot(111)\n self.ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n self.ax.yaxis.set_ticks(np.arange(0, self.n_vars))\n self.ax.yaxis.set_ticklabels(self.var_names)\n\n self.ax.set_xlim([-.5, .5])\n self.ax.set_ylim([-.5, self.n_vars-.5])\n self.quad = self.ax.pcolormesh(self.xs, self.ys, self.cs,\n vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, norm=self.norm)\n\n fig.colorbar(self.quad)\n\n self.ax.set_xlabel('Evaluation #')", "def setupVariableAxes(self):\n if self.var is None:\n return\n \n if (self.axisList is None):\n self.axisList = self.var.getAxisList()\n self.axisOrder = range(len(self.axisList))\n\n self.clear() \n self.setAxesNames()\n \n # Iterate through the variables axes & init each axis widget\n axisIndex = 0\n for axis, axisName in zip(self.axisList, self.axesNames):\n # Create the axis widget\n axisWidget = QAxis(axis, axisName, axisIndex, self)\n axisWidget.setAxisButtonText(axisName)\n self.axisWidgets.append(axisWidget)\n\n # Setup the layout for each axis\n row = self.gridLayout.rowCount()\n self.gridLayout.addWidget(axisWidget.getAxisButton(), row, 0)\n self.gridLayout.addWidget(axisWidget, row, 1) \n self.gridLayout.addWidget(axisWidget.getAxisOperationsButton(), row, 2)\n\n # Create separator line between each axis widget\n vline = QtGui.QFrame()\n vline.setFrameStyle(QtGui.QFrame.HLine | QtGui.QFrame.Sunken)\n self.gridLayout.addWidget(vline, row+1, 0, 1,\n self.gridLayout.columnCount())\n\n axisIndex += 1\n\n self.gridLayout.setRowStretch(self.gridLayout.rowCount(), 1)", "def initialize(self, stepEntities):\n if self.fig is not None:\n self.fig = None\n if self.ax is not None:\n self.ax = None\n self.xCoordinates = []\n self.sourceName = []\n\n self.destinations = self.options['how']['how'].lower().split(',')\n\n if 'figureProperties' in self.options:\n key = 'figureProperties'\n if 'figsize' not in self.options[key]:\n self.options[key]['figsize'] = None\n else:\n if self.options[key]['figsize'] is not None:\n if isinstance(self.options[key]['figsize'], str):\n self.options[key]['figsize'] = tuple([float(elm) for elm in ast.literal_eval(self.options[key]['figsize'])])\n if 'dpi' not in self.options[key]:\n self.options[key]['dpi'] = 'None'\n if 'facecolor' not in self.options[key]:\n self.options[key]['facecolor'] = 'None'\n if 'edgecolor' not in self.options[key]:\n self.options[key]['edgecolor'] = 'None'\n if 'frameon' not in self.options[key]:\n self.options[key]['frameon'] = 'True'\n elif utils.stringIsTrue(self.options[key]['frameon']):\n self.options[key]['frameon'] = 'True'\n elif utils.stringIsFalse(self.options[key]['frameon']):\n self.options[key]['frameon'] = 'False'\n self.fig, self.ax = plt.subplots(num=self.name,\n figsize=self.options[key]['figsize'],\n dpi=ast.literal_eval(self.options[key]['dpi']),\n facecolor=self.options[key]['facecolor'],\n edgecolor=self.options[key]['edgecolor'],\n frameon=ast.literal_eval(self.options[key]['frameon']),\n **self.options[key].get('attributes', {}))\n else:\n self.fig, self.ax = plt.subplots(num=self.name)\n if 'screen' in self.destinations and display:\n self.fig.show()\n\n if self.dim == 3:\n self.ax.remove() # remove axis since it was initialized for 2-d plots\n self.ax = self.fig.add_subplot(111, projection='3d') # replace with 3-d axis\n\n # initialize lists\n for pltIndex in range(len(self.options['plotSettings']['plot'])):\n self.colorMapCoordinates[pltIndex] = None\n if 'y' in self.options['plotSettings']['plot'][pltIndex]:\n self.yCoordinates = []\n if 'z' in self.options['plotSettings']['plot'][pltIndex]:\n self.zCoordinates = []\n if 'clusterLabels' in self.options['plotSettings']['plot'][pltIndex]:\n self.clusterLabels = []\n if 'mixtureLabels' in self.options['plotSettings']['plot'][pltIndex]:\n self.mixtureLabels = []\n if 'attributes' in self.options['plotSettings']['plot'][pltIndex]:\n if 'mixtureMeans' in self.options['plotSettings']['plot'][pltIndex]['attributes']:\n self.mixtureMeans = []\n if 'mixtureCovars' in self.options['plotSettings']['plot'][pltIndex]['attributes']:\n self.mixtureCovars = []\n\n for pltIndex in range(len(self.options['plotSettings']['plot'])):\n # fill lists\n self.xCoordinates.append(self.options['plotSettings']['plot'][pltIndex]['x'].split(','))\n self.sourceName.append(self.xCoordinates [pltIndex][0].split('|')[0].strip())\n if 'y' in self.options['plotSettings']['plot'][pltIndex]:\n self.yCoordinates.append(self.options['plotSettings']['plot'][pltIndex]['y'].split(','))\n if self.yCoordinates[pltIndex][0].split('|')[0] != self.sourceName[pltIndex]:\n self.raiseAnError(IOError, f'Every plot can be linked to one Data set. x_coord source is {self.sourceName[pltIndex]}. y_coord source is {self.yCoordinates[pltIndex][0].split(\"|\")[0]}')\n if 'z' in self.options['plotSettings']['plot'][pltIndex]:\n self.zCoordinates.append(self.options['plotSettings']['plot'][pltIndex]['z'].split(','))\n if self.zCoordinates[pltIndex][0].split('|')[0] != self.sourceName[pltIndex]:\n self.raiseAnError(IOError, f'Every plot can be linked to one Data set. x_coord source is {self.sourceName[pltIndex]}. z_coord source is {self.zCoordinates [pltIndex][0].split(\"|\")[0]}')\n if 'clusterLabels' in self.options['plotSettings']['plot'][pltIndex]:\n self.clusterLabels.append(self.options['plotSettings']['plot'][pltIndex]['clusterLabels'].split(','))\n if self.clusterLabels[pltIndex][0].split('|')[0] != self.sourceName[pltIndex]:\n self.raiseAnError(IOError, f'Every plot can be linked to one Data set. x_coord source is {self.sourceName[pltIndex]}. clusterLabels source is {self.clusterLabels [pltIndex][0].split(\"|\")[0]}')\n if 'mixtureLabels' in self.options['plotSettings']['plot'][pltIndex]:\n self.mixtureLabels.append(self.options['plotSettings']['plot'][pltIndex]['mixtureLabels'].split(','))\n if self.mixtureLabels[pltIndex][0].split('|')[0] != self.sourceName[pltIndex]:\n self.raiseAnError(IOError, f'Every plot can be linked to one Data set. x_coord source is {self.sourceName[pltIndex]}. mixtureLabels source is {self.mixtureLabels [pltIndex][0].split(\"|\")[0]}')\n if 'colorMap' in self.options['plotSettings']['plot'][pltIndex]:\n self.colorMapCoordinates[pltIndex] = self.options['plotSettings']['plot'][pltIndex]['colorMap'].split(',')\n if self.colorMapCoordinates[pltIndex][0].split('|')[0] != self.sourceName[pltIndex]:\n self.raiseAnError(IOError, f'Every plot can be linked to one Data set. x_coord source is {self.sourceName[pltIndex]}. colorMap_coordinates source is {self.colorMapCoordinates[pltIndex][0].split(\"|\")[0]}')\n # update options\n if 'interpPointsY' not in self.options['plotSettings']['plot'][pltIndex]:\n self.options['plotSettings']['plot'][pltIndex]['interpPointsY'] = '20'\n if 'interpPointsX' not in self.options['plotSettings']['plot'][pltIndex]:\n self.options['plotSettings']['plot'][pltIndex]['interpPointsX'] = '20'\n if 'interpolationType' not in self.options['plotSettings']['plot'][pltIndex]:\n self.options['plotSettings']['plot'][pltIndex]['interpolationType'] = 'linear'\n elif self.options['plotSettings']['plot'][pltIndex]['interpolationType'] not in self.availableInterpolators:\n self.raiseAnError(IOError, f'surface interpolation unknown. Available are : {self.availableInterpolators}')\n if 'epsilon' not in self.options['plotSettings']['plot'][pltIndex]:\n self.options['plotSettings']['plot'][pltIndex]['epsilon'] = '2'\n if 'smooth' not in self.options['plotSettings']['plot'][pltIndex]:\n self.options['plotSettings']['plot'][pltIndex]['smooth'] = '0.0'\n if ('cmap' not in self.options['plotSettings']['plot'][pltIndex]) or (self.options['plotSettings']['plot'][pltIndex]['cmap'] is None):\n self.options['plotSettings']['plot'][pltIndex]['cmap'] = 'None'\n elif (self.options['plotSettings']['plot'][pltIndex]['cmap'] != 'None') and (self.options['plotSettings']['plot'][pltIndex]['cmap'] not in matplotlib.cm.datad):\n self.raiseAnError(IOError, f'The colorMap \"{self.options[\"plotSettings\"][\"plot\"][pltIndex][\"cmap\"]}\" does not exist... Available are {matplotlib.cm.datad.keys()}')\n if 'interpolationTypeBackUp' not in self.options['plotSettings']['plot'][pltIndex]:\n self.options['plotSettings']['plot'][pltIndex]['interpolationTypeBackUp'] = 'nearest'\n elif self.options['plotSettings']['plot'][pltIndex]['interpolationTypeBackUp'] not in self.availableInterpolators:\n self.raiseAnError(IOError, f'surface interpolation (BackUp) unknown. Available are : {self.availableInterpolators}')\n if 'attributes' in self.options['plotSettings']['plot'][pltIndex]:\n if 'mixtureMeans' in self.options['plotSettings']['plot'][pltIndex]['attributes']:\n self.mixtureMeans.append(self.options['plotSettings']['plot'][pltIndex]['attributes']['mixtureMeans'].split(','))\n if 'mixtureCovars' in self.options['plotSettings']['plot'][pltIndex]['attributes']:\n self.mixtureCovars.append(self.options['plotSettings']['plot'][pltIndex]['attributes']['mixtureCovars'].split(','))\n self.numberAggregatedOS = len(self.options['plotSettings']['plot'])\n # collect sources\n self.legacyCollectSources(stepEntities)\n # initialize here the base class\n super().initialize(stepEntities)\n # execute actions (we execute the actions here also because we can perform a check at runtime!!\n self.__executeActions()", "def ranges(self, ranges):\n \n self._ranges = ranges", "def _declare_auto_axes_idx(self):\n if not self.axes_idx:\n self.axes_idx = BiMapping(to_first=range(len(self.name_elements)), to_second=range(len(self.name_elements)))", "def plot_values(self, plot_widget, data, x_range, y_range):\r\n\r\n self.widget = plot_widget\r\n self.data = data\r\n self.x_range = x_range\r\n self.y_range = y_range\r\n\r\n self.widget.setXRange(0, self.x_range)\r\n self.widget.setYRange(0, self.y_range)\r\n self.widget.showGrid(x=True, y=True)\r\n self.widget.addLegend()\r\n # self.widget.setLabel('left', 'Value', units='y')\r\n self.widget.setLabel('bottom', 'Frames')\r\n self.widget.clear()\r\n\r\n for item in self.data.items():\r\n line = self.widget.plot(np.insert(item[1], 0, item[1][0]), pen=self.get_color(item[0]),\r\n symbolPen=self.get_color(item[0]), symbol='o', symbolSize=1, name=item[0])\r\n self.marker(self.widget)", "def tick_values(self, vmin, vmax):\n raise NotImplementedError('Derived must override')", "def init(self, data_len):\n self._t = 0\n self._data_len = data_len\n self._data = np.empty((data_len, 0))\n self._plots = [self._ax.plot([], [], '.', markersize=4, color='black', \n alpha=self._alpha)[0] for _ in range(data_len)]\n\n self._init = True", "def on_axes_update(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update axes\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.chart_list[i].setXRange(self.worker.start_range,\n self.worker.samples_count + NUM_GUI_SAMPLES, padding=0.075)\n\n # for i, series in enumerate(self.measurements_list):\n #\n # # An optimization to prevent unnecessary rendering\n # if i == tab_open:\n #\n # # Remove old x-axis\n # series.detachAxis(self.xaxis_list[i])\n # self.chart_list[i].chart().removeAxis(self.xaxis_list[i])\n # self.xaxis_list[i] = QValueAxis()\n #\n # # Add new x-axis\n # self.chart_list[i].chart().addAxis(self.xaxis_list[i], Qt.AlignBottom)\n # self.xaxis_list[i].setRange(self.worker.samples_count, self.worker.samples_count +\n # NUM_GUI_SAMPLES)\n # series.attachAxis(self.xaxis_list[i])", "def set_omega_range(self, omega_range=(0, 0, 1)):\n self.omegase = np.arange(*omega_range, dtype=float)\n self.qptanalyzer.omegase = self.omegase", "def reset(self):\n # Don't reset axis labels\n self.range = ((0, 2, 1),) * self.ndim\n self.current_step = (0,) * self.ndim\n self.order = tuple(range(self.ndim))", "def __draw(self):\n plt.rcParams.update(self.settings.rcParams)\n\n self.fig = plt.figure()\n self.ax = self.fig.add_axes(self.axes_rect)\n\n xs = np.arange(1, self.xmax+1)\n ys = [np.arange(0, self.ymax) for i in range(self.xmax)]\n\n self.ax.plot(xs, ys)\n\n self.__draw_xaxis()\n self.__draw_yaxis()\n\n self.__draw_annotations()\n self.__draw_eras()\n self.__draw_era_spans()\n self.__draw_watermark()\n self.__draw_title()\n self.__draw_image()\n self.__draw_max_age()\n\n self.ax.set_aspect('equal', share=True)", "def __init__(self, ranges=None, *args, **kwargs):\n self.ranges = ranges\n super(DiscreteGeneticAlgorithm, self).__init__(*args, **kwargs)", "def setup_axes2(fig, rect,tmin, tmax,zmin,zmax):\n\n tr =PolarAxes.PolarTransform()\n pi = np.pi\n\n angle_ticks = [(tmin, '%.2f' % tmin), (0,r'$0$'), (tmax, '%.2f' % tmax)]\n\n grid_locator1 = FixedLocator([v for v, s in angle_ticks])\n tick_formatter1 = DictFormatter(dict(angle_ticks))\n\n grid_locator2 = MaxNLocator(4)\n\n grid_helper = floating_axes.GridHelperCurveLinear(\n tr, extremes=(tmax, tmin, zmax, zmin),\n grid_locator1=grid_locator1,\n grid_locator2=grid_locator2,\n tick_formatter1=tick_formatter1,\n tick_formatter2=None)\n\n ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)\n fig.add_subplot(ax1)\n\n # create a parasite axes whose transData in RA, cz\n aux_ax = ax1.get_aux_axes(tr)\n\n aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax\n ax1.patch.zorder = 0.95 # but this has a side effect that the patch is\n # drawn twice, and possibly over some other\n # artists. So, we decrease the zorder a bit to\n # prevent this.\n\n return ax1, aux_ax", "def __init__(self):\n super(vanderpol_output,self).__init__()\n\n # add figure object for further use\n fig = plt.figure()\n self.ax = fig.add_subplot(111)\n self.ax.set_xlim([-2.5,2.5])\n self.ax.set_ylim([-10.5,10.5])\n plt.ion()\n self.sframe = None", "def calc_axes(self):\n self.y_axis = np.linspace(0, self.image_shape[0] - 1, self.image_shape[0])\n self.x_axis = np.linspace(0, self.image_shape[1] - 1, self.image_shape[1])\n if hasattr(self, 'pixelsize'):\n self.y_axis *= self.pixelsize[0]\n self.x_axis *= self.pixelsize[1]\n\n # %%RETRIEVING FUNCTIONS", "def _make_axes(self):\n ax_idx = self.atlas.space.axes_order.index(\"frontal\")\n\n # make acustom axes dict\n atlas_shape = np.array(self.atlas.metadata[\"shape\"]) * np.array(\n self.atlas.metadata[\"resolution\"]\n )\n z_range = np.array([-atlas_shape[2], 0])\n z_ticks = [\n (-v, str(np.abs(v).astype(np.int32)))\n for v in np.linspace(\n 0,\n atlas_shape[ax_idx],\n 10,\n )\n ]\n\n if self.atlas.atlas_name == \"allen_human_500um\":\n z_range = None\n z_ticks = None\n logger.debug(\n \"RENDER: manually forcing axes size for human atlas, atlas needs fixing\"\n )\n\n # make custom axes dict\n axes = dict(\n axesLineWidth=3,\n tipSize=0,\n xtitle=\"AP (μm)\",\n ytitle=\"DV (μm)\",\n ztitle=\"LR (μm)\",\n textScale=0.8,\n xTitleRotation=180,\n zrange=z_range,\n zValuesAndLabels=z_ticks,\n xyGrid=False,\n yzGrid=False,\n zxGrid=False,\n xUseBounds=True,\n yUseBounds=True,\n zUseBounds=True,\n xLabelRotation=180,\n yLabelRotation=180,\n zLabelRotation=90,\n )\n\n return axes", "def ticks(self, domain_min, domain_max):\n raise NotImplementedError()", "def set_axis1_limits(self, start, end):\n if start > end:\n raise ValueError(\"Start point over end for this view.\")\n\n self.axis1_limits = start, end", "def preprocess(self):\n\n if self.x_range == None:\n x_min = min(np.min(self.fx), np.min(self.gx))\n x_max = max(np.max(self.fx), np.max(self.gx))\n self.x_range = [x_min,x_max]\n\n f_inter = interpolate.interp1d(self.fx, self.fy, 'cubic', fill_value = 'extrapolate')\n g_inter = interpolate.interp1d(self.gx, self.gy, 'cubic', fill_value = 'extrapolate')\n fgx_new = np.linspace(self.x_range[0], self.x_range[1], self.N)\n fy_new = f_inter(fgx_new)\n gy_new = g_inter(fgx_new)\n\n self.fx, self.fy = fgx_new, fy_new\n self.gx, self.gy = fgx_new, gy_new", "def _update_ax(self):\n raise NotImplementedError(\"Implement _update_ax(self) in subclass\")", "def adjust_axes(self, xmin, ymin, xmax, ymax):\n\n # FlatCAMApp.App.log.debug(\"PC.adjust_axes()\")\n\n width = xmax - xmin\n height = ymax - ymin\n try:\n r = width / height\n except ZeroDivisionError:\n FlatCAMApp.App.log.error(\"Height is %f\" % height)\n return\n canvas_w, canvas_h = self.canvas.get_width_height()\n canvas_r = float(canvas_w) / canvas_h\n x_ratio = float(self.x_margin) / canvas_w\n y_ratio = float(self.y_margin) / canvas_h\n\n if r > canvas_r:\n ycenter = (ymin + ymax) / 2.0\n newheight = height * r / canvas_r\n ymin = ycenter - newheight / 2.0\n ymax = ycenter + newheight / 2.0\n else:\n xcenter = (xmax + xmin) / 2.0\n newwidth = width * canvas_r / r\n xmin = xcenter - newwidth / 2.0\n xmax = xcenter + newwidth / 2.0\n\n # Adjust axes\n for ax in self.figure.get_axes():\n if ax._label != 'base':\n ax.set_frame_on(False) # No frame\n ax.set_xticks([]) # No tick\n ax.set_yticks([]) # No ticks\n ax.patch.set_visible(False) # No background\n ax.set_aspect(1)\n ax.set_xlim((xmin, xmax))\n ax.set_ylim((ymin, ymax))\n ax.set_position([x_ratio, y_ratio, 1 - 2 * x_ratio, 1 - 2 * y_ratio])\n\n # Sync re-draw to proper paint on form resize\n self.canvas.draw()\n\n ##### Temporary place-holder for cached update #####\n self.update_screen_request.emit([0, 0, 0, 0, 0])", "def setup_anime(self, xmin_off=0, ymin_off=0, xmax_off=0, ymax_off=0):\n xtremes = [(min(x), min(y), max(x), max(y)) for x, y in self.artists]\n xmin = min(map(lambda lst: lst[0], xtremes)) + xmin_off\n ymin = min(map(lambda lst: lst[1], xtremes)) + ymin_off\n xmax = max(map(lambda lst: lst[2], xtremes)) + xmax_off\n ymax = max(map(lambda lst: lst[3], xtremes)) + ymax_off\n print(\"Xtremes:\", xmin, xmax, ymin, ymax)\n\n self.fig = plt.figure()\n self.ax = plt.axes(xlim=(xmin, xmax), ylim=(ymin, ymax),\n autoscale_on=False)\n self.ax.set_facecolor('k')\n self.ax.set(xlabel='x [a.u.]', ylabel='y [a.u.]',\n title='Projectile motion')\n self.ax.set_aspect('equal')\n self.ax.grid()\n\n for a in range(self.art_num):\n ln, = self.ax.plot([], [], '--')\n ln.set_clip_on(False)\n self.lines.append(ln)\n\n plt.gca().set_prop_cycle(None)\n\n for a in range(self.art_num):\n pt, = self.ax.plot([], [], 'o')\n pt.set_clip_on(False)\n self.points.append(pt)\n\n self.time_template = 'time = %d a.u.'\n self.time_text = self.ax.text(.5, .5, '', color='c',\n transform=self.ax.transAxes,\n horizontalalignment='center',\n verticalalignment='center')", "def __set_ax_prop(self, ax):\n ax.set_xticks([])\n ax.set_yticks([])", "def onScales(self):\n # Ensure that we can work\n plt = Plot.getPlot()\n if not plt:\n self.updateUI()\n return\n # Get again all the subwidgets (to avoid PySide Pitfalls)\n mw = self.getMainWindow()\n form = mw.findChild(QtGui.QWidget, \"TaskPanel\")\n form.all = self.widget(QtGui.QCheckBox, \"allAxes\")\n form.xAuto = self.widget(QtGui.QCheckBox, \"xAuto\")\n form.yAuto = self.widget(QtGui.QCheckBox, \"yAuto\")\n form.xSMin = self.widget(QtGui.QLineEdit, \"xMin\")\n form.xSMax = self.widget(QtGui.QLineEdit, \"xMax\")\n form.ySMin = self.widget(QtGui.QLineEdit, \"yMin\")\n form.ySMax = self.widget(QtGui.QLineEdit, \"yMax\")\n\n axesList = [plt.axes]\n if form.all.isChecked():\n axesList = plt.axesList\n if not self.skip:\n self.skip = True\n # X axis\n if form.xAuto.isChecked():\n for ax in axesList:\n ax.set_autoscalex_on(True)\n form.xSMin.setEnabled(False)\n form.xSMax.setEnabled(False)\n lim = plt.axes.get_xlim()\n form.xSMin.setText(str(lim[0]))\n form.xSMax.setText(str(lim[1]))\n else:\n form.xSMin.setEnabled(True)\n form.xSMax.setEnabled(True)\n try:\n xMin = float(form.xSMin.text())\n except:\n xMin = plt.axes.get_xlim()[0]\n form.xSMin.setText(str(xMin))\n try:\n xMax = float(form.xSMax.text())\n except:\n xMax = plt.axes.get_xlim()[1]\n form.xSMax.setText(str(xMax))\n for ax in axesList:\n ax.set_xlim((xMin, xMax))\n # Y axis\n if form.yAuto.isChecked():\n for ax in axesList:\n ax.set_autoscaley_on(True)\n form.ySMin.setEnabled(False)\n form.ySMax.setEnabled(False)\n lim = plt.axes.get_ylim()\n form.ySMin.setText(str(lim[0]))\n form.ySMax.setText(str(lim[1]))\n else:\n form.ySMin.setEnabled(True)\n form.ySMax.setEnabled(True)\n try:\n yMin = float(form.ySMin.text())\n except:\n yMin = plt.axes.get_ylim()[0]\n form.ySMin.setText(str(yMin))\n try:\n yMax = float(form.ySMax.text())\n except:\n yMax = plt.axes.get_ylim()[1]\n form.ySMax.setText(str(yMax))\n for ax in axesList:\n ax.set_ylim((yMin, yMax))\n plt.update()\n self.skip = False", "def set_axes(self, a):\r\n self.axes = a", "def _set_interpolators(self):\n self._interp_u = interp.RectBivariateSpline(self.x_points,\n self.y_points,\n self._u_int)\n self._interp_v = interp.RectBivariateSpline(self.x_points,\n self.y_points,\n self._v_int)", "def __init__(\n self,\n x,\n y,\n x_index=[],\n y_index=[],\n x_range=None,\n y_range=None,\n use_tex=False,\n segments=100,\n colour=\"blue\",\n ):\n if not isinstance(x, Variable) or not isinstance(y, Variable):\n raise ValueError(\"both x and y values must be Variables\")\n if not isinstance(x, Scalar):\n raise ValueError(\"x value must be a Space in order to be plotted\")\n if isinstance(y, Space) or len(y.shape) > 0:\n raise ValueError(\"y value must be a scalar and dependent\")\n\n self.x = x\n self.x_index = x_index\n self.x_range = x_range\n self.xs = np.linspace(self.x.lower, self.x.upper, segments)\n\n self.y = y\n self.y_index = y_index\n self.y_range = y_range\n self.y.export(x)\n\n self.use_tex = use_tex\n\n self.segments = segments\n self.colour = colour\n\n self.figure = None\n self.line = None\n\n if self.use_tex:\n rc(\"font\", **{\"family\": \"serif\", \"serif\": [\"Computer Modern\"]})\n rc(\"text\", usetex=True)", "def setAllAxisUnits(self,units): \n self.__axis_units__ = units", "def set_figure_variables(self):\n #self.fig.canvas.manager.full_screen_toggle()\n self.gs = self.fig.add_gridspec(2, 3)\n self.ax1 = self.fig.add_subplot(self.gs[0, 0])\n self.ax2 = self.fig.add_subplot(self.gs[0, 1])\n self.ax3 = self.fig.add_subplot(self.gs[0, 2])\n self.ax4 = self.fig.add_subplot(self.gs[1, 0])\n self.ax5 = self.fig.add_subplot(self.gs[1, 1])\n self.ax6 = self.fig.add_subplot(self.gs[1, 2])\n # histogram with indicator scoring\n self.ax1.set_xlabel(\"indicators\")\n self.ax1.set_ylabel(\"score (%)\")\n # graph with flood safety levels\n self.ax2.set_xlabel(\"dike section\")\n self.ax2.set_ylabel(\"chance of flooding occurrence\")\n # graph with water levels vs dike height\n self.ax3.set_xlabel(\"river length (meters)\")\n self.ax3.set_ylabel(\"height (meters)\")\n # graph with overall costs made\n self.ax6.set_ylabel(\"million Euros\")\n \n self.ax1.set_ylim([0, 100])\n self.ax2.set_ylim([0, 100])\n self.ax3.set_ylim([14, 18])\n self.ax6.set_ylim([0, 25000000])\n \n self.ax1.set_title(\"Overall score on indicators\")\n self.ax2.set_title(\"Flood safety levels\")\n self.ax3.set_title(\"Normative water levels vs dike crest height\")\n self.ax6.set_title(\"Budget spent\")\n \n self.x_pos = np.arange(len(self.indicators))\n self.ax1.set_xticks(self.x_pos)\n self.ax1.set_xticklabels(self.indicators)\n \n flood_safety_levels = [100, 200, 400, 600, 800, 1000, 1250]\n self.ax2.set_yticks(flood_safety_levels)\n self.ax2.set_yticklabels([\"1/\"+str(value) for value in flood_safety_levels])\n \n self.plot1 = None\n self.plot2 = None\n self.plot3 = None\n self.plot4 = None\n self.plot5 = None\n self.plot6 = None\n return", "def reinitialiseData(self):\n if self.arrayPlotData is not None:\n self.currentPosition = 0\n self.xs = scipy.linspace(0.0, self.numberOfPoints*self.resolution, self.numberOfPoints)\n self.cursorXS = self.getCurrentPositionArray()\n self.cursorVertical = scipy.array([self.verticalLimit,0.0])\n self.arrayPlotData.set_data(\"xs\",self.xs)\n self.array0 = scipy.zeros(self.numberOfPoints)\n self.array1 = scipy.zeros(self.numberOfPoints)\n self.array2 = scipy.zeros(self.numberOfPoints)\n self.array3 = scipy.zeros(self.numberOfPoints)\n self.array4 = scipy.zeros(self.numberOfPoints)\n self.array5 = scipy.zeros(self.numberOfPoints)\n self.array6 = scipy.zeros(self.numberOfPoints)\n self.array7 = scipy.zeros(self.numberOfPoints)\n self.channels = [self.array0,self.array1,self.array2,self.array3,\n self.array4,self.array5,self.array6,self.array7]\n self.updateArrayPlotData()", "def _set_dims(xs, ys, dmax):\n\n xmin = np.min(xs)\n xmax = np.max(xs)\n\n ymin = np.min(ys)\n ymax = np.max(ys)\n\n x_abs = np.abs(xmax - xmin)\n y_abs = np.abs(ymax - ymin)\n\n if x_abs > y_abs:\n step = x_abs / dmax\n x_dim_coords = np.arange(xmin + step, xmax + step, step)\n y_dim_coords = np.arange(ymin + step, ymax + step, step)\n else:\n step = y_abs / dmax\n y_dim_coords = np.arange(ymin + step, ymax + step, step)\n x_dim_coords = np.arange(xmin + step, xmax + step, step)\n\n # y_dim_coords must be flipped\n\n y_dim_coords = y_dim_coords[::-1]\n return x_dim_coords, y_dim_coords, [step, xmin, xmax, ymin, ymax]", "def updatePlot(self,*args):\n # set x limits\n timeDisplayOptions = {'10 minutes':10,'1 hour':60,'6 hours':6*60,'24 hours':24*60,'All':0}\n try:\n lastDatetime = mpl.dates.num2date(self.stage60K.get_xdata()[-1])\n firstDatetime = mpl.dates.num2date(self.stage60K.get_xdata()[0])\n except IndexError: # no data yet\n now = datetime.datetime.utcnow().toordinal()\n firstDatetime = mpl.dates.num2date(now)\n lastDatetime = firstDatetime\n xMin = lastDatetime-datetime.timedelta(minutes=timeDisplayOptions[self.wScale.get()])\n xMin = max([ firstDatetime, xMin ])\n if self.wScale.get() == 'All':\n xMin = firstDatetime\n xMinIndex = numpy.searchsorted( self.stage60K.get_xdata(), mpl.dates.date2num(xMin) )\n # rescale axes, with the x being scaled by the slider\n if self.toolbar._active == 'HOME' or self.toolbar._active == None:\n ymin,ymax = 10000000, -10000000\n lineAndVar = { self.stage60K: self.t60K,\n self.stage03K: self.t3K,\n self.stageGGG: self.tGGG,\n self.stageFAA: self.tFAA }\n if len(self.stage60K.get_xdata()) > 1:\n for line in lineAndVar.keys():\n if lineAndVar[line].get() == 0:\n line.set_visible(False)\n else:\n line.set_visible(True)\n ydata = line.get_ydata()[xMinIndex:-1]\n try:\n ymin = min(ymin, numpy.nanmin(ydata))\n ymax = max(ymax, numpy.nanmax(ydata))\n except ValueError as e:\n pass\n self.ax.set_xlim(xMin,lastDatetime)\n self.ax.set_ylim(ymin - (ymax-ymin)/10, ymax + (ymax-ymin)/10)\n hfmt = mpl.dates.DateFormatter('%H:%M:%S', tz=tz.tzlocal())\n self.ax.xaxis.set_major_formatter(hfmt)\n self.fig.autofmt_xdate()\n self.fig.tight_layout()\n #draw\n self.canvas.draw()", "def _get_extent_axes(self, x):\n if not hasattr(self, 'get_subplotspec'):\n return [self]\n y = ('y' if x == 'x' else 'x')\n idx = (0 if x == 'x' else 1)\n argfunc = (np.argmax if x == 'x' else np.argmin)\n irange = self._range_gridspec(x)\n axs = [ax for ax in self.figure._axes_main\n if ax._range_gridspec(x) == irange]\n if not axs:\n return [self]\n else:\n pax = axs.pop(argfunc([ax._range_gridspec(y)[idx] for ax in axs]))\n return [pax, *axs]", "def get_axis_vals(self):\n return self._x_axis, self._y_axis", "def __init__(self, ranges):\n if not ranges:\n raise Exception(\"You must supply at least one non-null sampling range\")\n if hasattr(ranges[0], \"__len__\"):\n assert all(len(x) == 2 for x in ranges)\n self.ranges = ranges\n else:\n assert len(ranges) > 1\n lows = [x for x in ranges[:-1]]\n highs = [x for x in ranges[1:]]\n myranges = []\n for i, pair in enumerate(zip(lows, highs)):\n if i % 2 == 0:\n myranges.append(pair)\n assert len(myranges) == len(ranges) // 2\n self.ranges = myranges", "def set_data(self, x = None, y = None):\n self.x_axis = x\n self.y_axis = y", "def _plot_setup(self, fig, ax):\n\n self._check_data_valid()\n\n if ax:\n self.fig = fig\n self.ax = ax\n else:\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111, projection=self.wcs)\n\n # Set basic figure display options\n if self.options.get('grid', True):\n self.ax.coords.grid(color='white', alpha=0.5)\n\n if self.options.get('title', True):\n title = self.options.get('title', self.surveyname)\n self.ax.set_title(title, fontdict={'fontsize': 20, 'fontweight': 10})\n\n self.set_xlabel('RA (J2000)')\n self.set_ylabel('Dec (J2000)')\n\n # Set compact or extended label / tick configuration\n if self.options.get('compact', False):\n tickcolor = 'k' if np.nanmax(np.abs(self.data)) == np.nanmax(self.data) else 'gray'\n\n lon = self.ax.coords[0]\n lat = self.ax.coords[1]\n\n lon.display_minor_ticks(True)\n lat.display_minor_ticks(True)\n\n lon.set_ticks(number=5)\n lat.set_ticks(number=5)\n\n self.ax.tick_params(axis='both', direction='in', length=5, color=tickcolor)\n self.padlevel = self.options.get('ylabelpad', 5)\n\n # Set colourmap normalisation\n self.norm = self._get_cmap_normalisation()", "def _setup_plot(self):\n assert self.data is not None\n self.graph.reset()\n\n data, domain = self.data, self.data.domain\n self.graph.getAxis('bottom').setTicks([\n [(i+1, str(a)) for i, a in enumerate(self.graph_variables)]\n ])\n\n X = np.arange(1, len(self.graph_variables)+1)\n groups = []\n\n if not self.selected_classes:\n group_data = data[:, self.graph_variables]\n items, mean, meancurve, errorbar = self._plot_curve(\n X, QColor(Qt.darkGray), group_data,\n list(range(len(self.data))))\n groups.append(\n namespace(\n data=group_data,\n profiles=items,\n mean=meancurve,\n boxplot=errorbar)\n )\n else:\n var = domain[self.group_var]\n class_col_data, _ = data.get_column_view(var)\n group_indices = [np.flatnonzero(class_col_data == i)\n for i in range(len(self.classes))]\n\n for i, indices in enumerate(group_indices):\n if len(indices) == 0:\n groups.append(None)\n else:\n if self.classes:\n color = self.class_colors[i]\n else:\n color = QColor(Qt.darkGray)\n\n group_data = data[indices, self.graph_variables]\n items, mean, meancurve, errorbar = self._plot_curve(\n X, color, group_data, indices)\n\n groups.append(\n namespace(\n data=group_data, indices=indices,\n profiles=items, mean=meancurve,\n boxplot=errorbar)\n )\n\n self.__groups = groups\n self.__update_visibility()" ]
[ "0.71792495", "0.7129534", "0.68548447", "0.67877525", "0.6537042", "0.64943486", "0.641954", "0.6235867", "0.6223747", "0.6188791", "0.61873025", "0.61726326", "0.6160985", "0.6133905", "0.61263645", "0.61139727", "0.61138016", "0.61124223", "0.60551167", "0.60020185", "0.60020185", "0.59837145", "0.5980665", "0.59750354", "0.59656835", "0.5928879", "0.5917828", "0.590145", "0.58835524", "0.5881805", "0.58786243", "0.5868251", "0.58658785", "0.58440953", "0.5837681", "0.5803107", "0.5775271", "0.5739164", "0.5728998", "0.5694113", "0.5690952", "0.5681594", "0.56772095", "0.5670369", "0.56685215", "0.5665731", "0.5663582", "0.56583065", "0.56574965", "0.5639311", "0.56244123", "0.56203365", "0.5605737", "0.5597768", "0.5581192", "0.5565187", "0.5559198", "0.5550604", "0.5547985", "0.55429745", "0.55417603", "0.5539543", "0.55243945", "0.5516972", "0.55107445", "0.5498209", "0.548319", "0.54788977", "0.547823", "0.5468815", "0.5465744", "0.5465681", "0.54637796", "0.545075", "0.5415255", "0.54126", "0.5405933", "0.5405179", "0.5404775", "0.5396216", "0.5395568", "0.5373928", "0.53688484", "0.5360573", "0.5357853", "0.5356197", "0.5354375", "0.5347948", "0.5347383", "0.5336786", "0.5335964", "0.5335769", "0.5334999", "0.53234535", "0.5320465", "0.5312754", "0.531026", "0.52914023", "0.52910835", "0.5291049" ]
0.7375276
0
Setup a host for proper deployment. Assuming Debian Linux.
def setup(): debs = ("python-setuptools", "apache2", "libapache2-mod-wsgi") require("hosts", provided_by=[production, staging]) sudo("apt-get install %s" % " ".join(debs)) sudo("easy_install virtualenv pip") sudo("mkdir -p %(path)s" % env) with cd("%(path)s" % env): sudo("mkdir -p releases; mkdir -p packages") sudo("virtualenv --no-site-packages .") sudo("mkdir -p /var/log/twit-demo; chown www-data:www-data /var/log/twit-demo")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_machine():\n # Initial setup and package install.\n sudo(\"aptitude update\")\n sudo(\"aptitude -y install git-core python-dev python-setuptools \"\n \"postgresql-dev postgresql-client build-essential \"\n \"libpq-dev subversion mercurial apache2 \"\n \"libapache2-mod-wsgi\")", "def setup():\n require('hosts', provided_by=[prod])\n require('code_root')\n sudo('apt-get update')\n sudo('apt-get install -y python-setuptools')\n sudo('easy_install pip')\n sudo('pip install virtualenv')\n sudo('aptitude install -y apache2')\n sudo('aptitude install -y libapache2-mod-wsgi')\n sudo('apt-get install -y nginx')\n update_webserver_config()\n sudo('mkdir -p %s; cd %s; virtualenv .;' % (env.code_root, env.code_root))\n sudo('cd %s;mkdir releases; mkdir shared; mkdir packages; mkdir shared/media; mkdir shared/media/file;' % (env.code_root))\n deploy()", "def demo_host(branch='master'):\n env.hosts = ['demo.starzel.de']\n env.port = '30363'\n env.deploy_user = 'zope'\n env.branch = branch\n env.homedir = '/home/%s/' % env.deploy_user\n env.directory = '/home/%s/demo.starzel.de/' % env.deploy_user", "def demo_host(branch='master', latest=False, python3=True):\n env.hosts = ['demo.plone.de']\n env.domain = 'http://demo.plone.org'\n env.zeoclient_port = '8082'\n env.port = '30363'\n env.deploy_user = 'zope'\n env.branch = branch\n env.latest = latest\n env.python3 = python3\n env.homedir = '/home/%s/' % env.deploy_user\n env.directory = '/home/%s/demo.plone.de/' % env.deploy_user", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def setup_server():\n\n require('environment', provided_by=env.environments)\n upgrade_packages()\n # Install required system packages for deployment, plus some extras\n # Install pip, and use it to install virtualenv\n install_packages()\n sudo(\"easy_install -i http://d.pypi.python.org/simple -U pip\")\n sudo(\"pip install -i http://d.pypi.python.org/simple -U virtualenv\")\n create_postgis_template()\n create_db_user()\n create_db()\n create_webserver_user()", "def setup():\n # Ignore errors if the user already exists.\n with settings(user=env.ROOT_USER, password=env.ROOT_PASS, warn_only=True):\n # Create a new system user.\n result = execute('system.user_create',\n env.SYSTEM_USER,\n env.SYSTEM_PASS)\n\n # Upload SSH key for the new system.\n if result.get(env.host):\n execute('system.user_sshkey', env.SYSTEM_USER)\n\n ##############################\n # RUN SERVER UPDATES\n ##############################\n\n execute('system.update')\n\n ##############################\n # BASIC SERVER SECURITY\n ##############################\n\n # Disable password authentication.\n execute('system.ssh_disable_password_authentication')\n # Disable root login.\n execute('system.ssh_disable_root_login')\n # Restart SSH.\n execute('system.ssh_restart')\n\n # Install ufw\n execute('ufw.install')\n # Deny incoming connections.\n execute('ufw.default')\n # Allow SSH (22/tcp) access.\n execute('ufw.allow', 'ssh')\n # Allow HTTP (80/tcp) access.\n execute('ufw.allow', 'http')\n # Allow HTTPS (443/tcp) access.\n execute('ufw.allow', 'https')\n # Enable the firewall.\n execute('ufw.enable')\n\n # Install supervisor\n execute('supervisor.install')\n\n # Install mercurial\n execute('mercurial.install')\n\n # Install nginx\n execute('nginx.install')\n execute('nginx.config')\n execute('nginx.restart')\n\n # Setup Python Environment.\n require('PYTHON_VENV')\n\n execute('python.dev')\n execute('python.venv', env.PYTHON_VENV)\n execute('python.install', env.PYTHON_VENV)\n\n # Deploy the project.\n #\n # fab --config=config.conf project.clone \\\n # project.config \\\n # project.migrate \\\n # project.collectstatic \\\n # project.restart\n execute('project.clone')\n execute('project.config')\n execute('project.migrate')\n execute('project.collectstatic')\n execute('project.restart')\n\n execute('supervisor.restart')\n execute('supervisor.reread')\n execute('supervisor.update')", "def keystonehost():\n env.cd = cd\n env.run = run\n env.hosts = settings.HOSTS['keystone']\n env.exists = exists", "def set_hostname(dut, host_name):\n cmd = \"sudo hostname {}\".format(host_name)\n st.config(dut, cmd)\n return", "def bootstrap():\n validate_configurator_version()\n\n # put new mkinitcpio.conf in place\n run(\"mv /etc/mkinitcpio.conf.pacnew /etc/mkinitcpio.conf\")\n sed(\"/etc/mkinitcpio.conf\",\n 'MODULES=\"\"',\n 'MODULES=\"xen-blkfront xen-fbfront xen-kbdfront xen-netfront xen-pcifront xenbus_probe_frontend xenfs\"') # nopep8\n sed(\"/etc/mkinitcpio.conf\",\n 'HOOKS=\"base udev autodetect modconf block filesystems keyboard fsck',\n 'HOOKS=\"base udev block filesystems shutdown autodetect\"')\n\n # upgrade pacakges\n run(\"pacman --noconfirm -Syu\")\n\n # put new pacman.conf in place\n run(\"mv /etc/pacman.conf.pacnew /etc/pacman.conf\")\n\n # install essential packages\n run(\"pacman --noconfirm -S base-devel\")\n run(\"pacman --noconfirm -S curl git rsync\")\n\n # create a user, named 'aur', to safely install AUR packages under fakeroot\n # uid and gid values auto increment from 1000\n # to prevent conficts set the 'aur' user's gid and uid to 902\n run(\"groupadd -g 902 aur && useradd -m -u 902 -g 902 -G wheel aur\")\n\n # allow users in the wheel group to sudo without a password\n uncomment(\"/etc/sudoers\", \"wheel.*NOPASSWD\")\n\n # install yaourt and upgrade non-pacman rackspace installed packages\n sudo(\"rm -rf /home/aur/.builds && mkdir /home/aur/.builds/\", user=\"aur\")\n with cd(\"/home/aur/.builds/\"):\n sudo(\"bash <(curl aur.sh) -si --noconfirm package-query yaourt\", user=\"aur\")\n sudo(\"yaourt --noconfirm -S xe-guest-utilities\", user=\"aur\")\n\n # allow fabric to sftp with contrib.files.put\n # http://stackoverflow.com/questions/10221839/cant-use-fabric-put-is-there-any-server-configuration-needed # nopep8\n # change before reboot because then the sshd config will be reloaded\n # sed(\"/etc/ssh/sshd_config\", \"Subsystem sftp /usr/lib/openssh/sftp-server\",\n # \"Subsystem sftp internal-sftp\")\n\n # systemd\n sed(\"/boot/grub/menu.lst\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0 init=/usr/lib/systemd/systemd\")\n reboot()\n if not contains(\"/proc/1/comm\", \"systemd\"):\n abort(\"systemd is not installed properly\")\n server = [s for s in env.bootmachine_servers if s.public_ip == env.host][0]\n run(\"hostnamectl set-hostname {0}\".format(server.name))\n run(\"mv /etc/locale.gen.pacnew /etc/locale.gen.conf\")\n uncomment(\"/etc/locale.gen\", \"en_US.UTF-8 UTF-8\")\n uncomment(\"/etc/locale.gen\", \"en_US ISO-8859-1\")\n run(\"locale-gen\")\n run(\"localectl set-locale LANG='en_US.utf8'\")\n run(\"timedatectl set-timezone US/Central\")", "def init_host(self, host):\n LOG.debug(\"init_host\")", "def create_host(self, wwpns, hostname):\n\n if not wwpns or len(wwpns) == 0 or not hostname or len(hostname) == 0:\n ex_args = {'wwpns': wwpns,\n 'hostname': hostname}\n raise SVCCreateHostParameterError(**ex_args)\n\n ports = ':'.join(wwpns)\n # get the host shortname.\n hostname_str = hostname.split('.')[0]\n LOG.debug(\"enter: create_host(): wwpns=%(wwpns)s\"\n \" hostname=%(hostname)s\"\n % {'wwpns': ports, 'hostname': hostname_str})\n\n rand_id = str(random.randint(0, 99999999)).zfill(8)\n host_name = '%s-%s' % (self._hostname_prefix(hostname_str), rand_id)\n\n cmd = 'mkhost -name %(host_name)s -hbawwpn %(ports)s -force' % locals()\n\n output, err_output = self._svc_command(cmd)\n\n if err_output:\n # err_output should be a list type\n if isinstance(err_output, types.ListType):\n err_msg = err_output[0]\n else:\n err_msg = err_output\n err_code = err_msg.split()[0]\n\n if err_code and err_code == 'CMMVC6035E':\n # host has been defined on the storage, but we don't see it.\n # return None and ask caller to run cfgdev to relogin to SAN\n # and retry get_host_from_wwpns().\n return None\n\n msg = (_(\"create_host() failure cmd=%(cmd)s, error:%(err_output)s.\"\n \" Make sure host and storage are zoned properly and check\"\n \" SAN fabric connectivity\") % locals())\n\n LOG.exception(msg)\n ex_args = {'host_name': hostname_str,\n 'err_output': err_output}\n raise SVCCreateHostFailed(**ex_args)\n\n return host_name", "def setUp(self):\n self.os = \"debian\"", "def setup_dhcp_env(device):\n raise NotImplementedError", "def _install(self, host):\n pass", "def assimilate(ip_addr, config, instance_data, deploypass):\n env.host_string = ip_addr\n env.user = 'root'\n env.abort_on_prompts = True\n env.disable_known_hosts = True\n\n # Sanity check\n run(\"date\")\n\n distro = config.get('distro')\n # Set our hostname\n hostname = \"{hostname}\".format(**instance_data)\n run(\"hostname %s\" % hostname)\n if distro in ('ubuntu', 'debian'):\n run(\"echo %s > /etc/hostname\" % hostname)\n\n # Resize the file systems\n # We do this because the AMI image usually has a smaller filesystem than\n # the instance has.\n if 'device_map' in config:\n for mapping in config['device_map'].values():\n run('resize2fs {dev}'.format(dev=mapping['instance_dev']))\n\n # Set up /etc/hosts to talk to 'puppet'\n hosts = ['127.0.0.1 %s localhost' % hostname,\n '::1 localhost6.localdomain6 localhost6']\n hosts = StringIO.StringIO(\"\\n\".join(hosts) + \"\\n\")\n put(hosts, '/etc/hosts')\n\n if distro in ('ubuntu', 'debian'):\n put('releng.list', '/etc/apt/sources.list')\n run(\"apt-get update\")\n run(\"apt-get install -y --allow-unauthenticated puppet\")\n run(\"apt-get clean\")\n else:\n # Set up yum repos\n run('rm -f /etc/yum.repos.d/*')\n put('releng-public.repo', '/etc/yum.repos.d/releng-public.repo')\n run('yum clean all')\n run('yum install -q -y puppet')\n\n run(\"wget -O /root/puppetize.sh https://hg.mozilla.org/build/puppet/raw-file/default/modules/puppet/files/puppetize.sh\")\n run(\"chmod 755 /root/puppetize.sh\")\n put(StringIO.StringIO(deploypass), \"/root/deploypass\")\n put(StringIO.StringIO(\"exit 0\\n\"), \"/root/post-puppetize-hook.sh\")\n\n puppet_master = random.choice(instance_data[\"puppet_masters\"])\n run(\"PUPPET_SERVER=%s /root/puppetize.sh\" % puppet_master)\n\n if 'home_tarball' in instance_data:\n put(instance_data['home_tarball'], '/tmp/home.tar.gz')\n with cd('~cltbld'):\n sudo('tar xzf /tmp/home.tar.gz', user=\"cltbld\")\n sudo('chmod 700 .ssh', user=\"cltbld\")\n sudo('chmod 600 .ssh/*', user=\"cltbld\")\n run('rm -f /tmp/home.tar.gz')\n\n if \"buildslave_password\" in instance_data:\n # Set up a stub buildbot.tac\n sudo(\"/tools/buildbot/bin/buildslave create-slave /builds/slave \"\n \"{buildbot_master} {name} \"\n \"{buildslave_password}\".format(**instance_data), user=\"cltbld\")\n if instance_data.get(\"hg_shares\"):\n hg = \"/tools/python27-mercurial/bin/hg\"\n for share, bundle in instance_data['hg_shares'].iteritems():\n target_dir = '/builds/hg-shared/%s' % share\n sudo('rm -rf {d} && mkdir -p {d}'.format(d=target_dir), user=\"cltbld\")\n sudo('{hg} init {d}'.format(hg=hg, d=target_dir), user=\"cltbld\")\n hgrc = \"[path]\\n\"\n hgrc += \"default = http://hg.mozilla.org/%s\\n\" % share\n put(StringIO.StringIO(hgrc), '%s/.hg/hgrc' % target_dir)\n run(\"chown cltbld: %s/.hg/hgrc\" % target_dir)\n sudo('{hg} -R {d} unbundle {b}'.format(hg=hg, d=target_dir,\n b=bundle), user=\"cltbld\")\n\n run(\"reboot\")", "def deploy():\n require(\"hosts\", provided_by=[production, staging])\n env.release = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n upload_tar_from_git()\n install_requirements()\n setup_webserver()\n symlink_current_release()\n restart_webserver()", "def _prepare_hosts(container_dir, app):\n etc_dir = os.path.join(container_dir, 'overlay', 'etc')\n fs.mkdir_safe(etc_dir)\n new_hosts = os.path.join(etc_dir, 'hosts')\n new_hosts_orig = os.path.join(etc_dir, 'hosts.original')\n new_host_aliases = os.path.join(etc_dir, 'host-aliases')\n\n shutil.copyfile(\n '/etc/hosts',\n new_hosts\n )\n shutil.copyfile(\n '/etc/hosts',\n new_hosts_orig\n )\n fs.mkdir_safe(new_host_aliases)\n\n pwnam = pwd.getpwnam(app.proid)\n os.chown(new_host_aliases, pwnam.pw_uid, pwnam.pw_gid)", "def test_deploy_with_remote_host(self):\n remote_host = CONF.tests.remote_host\n transportfile = self._make_transport_file()\n self.sdkapi.guest_create(self.userid, 1, 1024, disk_list=self.disks)\n self.sdkapi.guest_deploy(self.userid,\n self.image_name,\n transportfiles=transportfile,\n remotehost=remote_host)\n self.sdkapi.guest_start(self.userid)\n powered_on = self.test_util.wait_until_guest_in_power_state(\n self.userid, 'on')\n self.assertTrue(powered_on)", "def setup():\n local( main_dir + \"/bin/vagrant_setup.sh\" )", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/collective/demo.plone.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n if env.latest:\n if env.python3:\n sudo('ln -s local_demo_nightly_py3.cfg local.cfg', user=env.deploy_user) # noqa: E501\n else:\n sudo('ln -s local_demo_nightly_py2.cfg local.cfg', user=env.deploy_user) # noqa: E501\n else:\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n if env.latest:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/plone/buildout.coredev/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n else:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/starzel/buildout/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def configure_remote_hostname_override(target='prod'):\n config = setup_env('etc/ep.remote.cfg')\n env.update(config._sections['energyportal_%s' % target])\n\n upload_template('remote.template.py',\n '/home/ubuntu/ep_site/settings/components/env/%s.py' % target,\n use_sudo=True, template_dir='fabfile/templates', use_jinja=True, context=env)", "def set_up(self):\n \n print '\\n', '=' * 20, \"SETUP HOST %s\" % (self.address,), '=' * 20\n \n # Rsync. Copy process_manager.py\n path = 'cluster_test_%d' % self.address[1]\n \n pm_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'base_remote_resources'))\n \n src = [pm_path]\n \n # To working dir in remote machine.\n dest = \"%s@%s:%s\" % (self.user_name, self.address[0], path)\n if not self._rsync(src, dest):\n # Rsync fails.\n print self.address, \"encounters problems when synchronizing files.\"\n return\n \n print \"Syncing local resources...\"\n \n all_syncs = []\n if \"\" in console_config._local_resource_syncs:\n all_syncs.extend(console_config._local_resource_syncs[\"\"])\n \n if self.address[0] in console_config._local_resource_syncs:\n all_syncs.extend(console_config._local_resource_syncs[self.address[0]])\n \n for local_path, rel_path in all_syncs:\n local_path = os.path.abspath(local_path)\n if not self._rsync(local_path, \"%s%s%s\" % (dest, os.sep, rel_path)):\n print self.address, \"encounters problems when syncing local files.\"\n return\n \n print \"Sync'd.\"\n \n print \"Attaching downloadable resources...\"\n \n all_dls = []\n if \"\" in console_config._remote_resource_downloads:\n all_dls.extend(console_config._remote_resource_downloads[\"\"])\n \n if self.address[0] in console_config._remote_resource_downloads:\n all_dls.extend(console_config._remote_resource_downloads[self.address[0]])\n \n for url, rel_path in all_dls:\n if not self._download(url, rel_path):\n print self.address, \"encounters problems when downloading files.\"\n return\n \n print \"Attached.\"\n \n print \"Running setup scripts...\"\n \n if self.address[0] in console_config._setup_scripts:\n \n setup_scripts = console_config._setup_scripts[self.address[0]]\n \n temp_dir = tempfile.mkdtemp();\n \n for i, snippet in enumerate(setup_scripts):\n \n name, snippet, lang, isFile, shouldRun = snippet\n \n filename = os.path.join(temp_dir, name)\n \n if not isFile :\n with open(filename, 'w') as file:\n file.write(snippet) \n else:\n shutil.copyfile(snippet, filename)\n \n remote_snippet_dir = dest + \"/base_remote_resources/scripts\"\n \n if not self._rsync(temp_dir + os.sep, remote_snippet_dir):\n print self.address, \"encounters problems when uploading snippets.\"\n return\n \n shutil.rmtree(temp_dir)\n \n # Run snippets\n for i, snippet in enumerate(setup_scripts):\n \n name, snippet, lang, isFile, shouldRun = snippet\n if not shouldRun: continue\n \n remote_filename = \"base_remote_resources/scripts/%s\" % name\n \n run_cmd = \". ./%s\" % remote_filename\n \n if not self._ssh(run_cmd, use_tty=True):\n print self.address, \"encounters problems when executing snippets.\"\n return\n \n \n # Use SSH to run process manager.\n # nohup python process_manager.py >process_manager.out 2>&1\n # </dev/null &\n ssh_command = 'cd base_remote_resources; bash restart_process_manager.sh %d' % self.address[1]\n \n # print ssh_command\n \n if self._ssh(ssh_command):\n print self.address, \"process manager has been set up.\"\n else:\n print self.address, \"encounters problems when running SSH.\"\n return\n \n print \"Done.\"", "def horizonhost():\n env.cd = cd\n env.run = run\n env.hosts = settings.HOSTS['horizon']\n env.exists = exists", "def do_command(self, args):\n hostlist = []\n threads = []\n environment = ''\n getenv = '(grep -q \"^kvm \" /proc/modules && echo \"kvm\") || ' \\\n '(/usr/sbin/xend status >/dev/null 2>&1 && echo \"xen\") || ' \\\n 'echo \"bare\"'\n if len(args) == 0:\n raise ValueError('No arguments given.')\n sys.stdout.write(\n 'Starting to prepare hosts. '\n 'Please wait, this may take a while...\\n')\n for host in args:\n host = chk_hostname(host)\n if host not in hostlist:\n hostlist.append(host)\n process = Popen(['/usr/bin/ssh',\n '-o PasswordAuthentication=no', 'root@%s' % (host, ),\n getenv], stderr=None, stdout=PIPE)\n retval = process.wait()\n if retval == 0:\n output = process.communicate()[0].strip().split('\\n')\n if len(output) == 1 and output[0] in ('xen', 'kvm'):\n environment = output[0]\n if environment == 'xen':\n threads.append(preparation.XenHostPreparation(self, host))\n elif environment == 'kvm':\n threads.append(preparation.KvmHostPreparation(self, host))\n else:\n self.failed = 1\n sys.stderr.write(\n 'Preparation of host %s failed\\n'\n 'Reason:\\n'\n 'Could not determine the test environment.\\n'\n % (host, ))\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n if self.failed == 1:\n raise ValueError('Preparation of some hosts failed.')", "def setup_local_hosting(job, event_list, img_src, generate=False):\n msg = 'Setting up local hosting for {}'.format(job.get_type())\n event_list = push_event(event_list, msg)\n outter_dir = os.path.join(\n job.config.get('host_directory'),\n job.config.get('run_id'))\n if not os.path.exists(outter_dir):\n os.makedirs(outter_dir)\n host_dir = os.path.join(\n outter_dir,\n 'year_set_{}'.format(str(job.config.get('year_set'))))\n if not os.path.exists(img_src):\n msg = '{job} hosting failed, no image source at {path}'.format(\n job=job.get_type(),\n path=img_src)\n logging.error(msg)\n return\n if os.path.exists(host_dir):\n try:\n msg = 'removing and replacing previous files from {}'.format(host_dir)\n logging.info(msg)\n rmtree(host_dir)\n except Exception as e:\n logging.error(format_debug(e))\n print_debug(e)\n try:\n msg = 'copying images from {src} to {dst}'.format(src=img_src, dst=host_dir)\n logging.info(msg)\n copytree(src=img_src, dst=host_dir)\n except Exception as e:\n logging.error(format_debug(e))\n msg = 'Error copying {} to host directory'.format(job.get_type())\n event_list = push_event(event_list, 'Error copying coupled_diag to host_location')\n return\n\n if generate:\n prev_dir = os.getcwd()\n os.chdir(host_dir)\n job.generateIndex(output_dir=host_dir)\n os.chdir(prev_dir)\n\n subprocess.call(['chmod', '-R', '777', outter_dir])\n\n host_location = os.path.join(\n job.config.get('host_prefix'),\n job.config.get('run_id'),\n 'year_set_{}'.format(str(job.config.get('year_set'))),\n 'index.html')\n msg = '{job} hosted at {url}'.format(\n url=host_location,\n job=job.get_type())\n event_list = push_event(event_list, msg)", "def _provision_node(\n self,\n name,\n hostname,\n ):\n docker_utils.install_docker(\n hostname=hostname,\n ssh_port=SSH_PORT,\n ssh_username=self.get_ssh_username(name),\n ssh_private_key_file=self.get_ssh_private_key_file(name),\n executor=name,\n logger=self._logger,\n )", "def init():\n\n banner(\"init\")\n with show(\"output\"):\n if not env.get('no_apt_update'):\n sudo('apt-get update')\n\n require.directory(env.path, mode=\"777\", use_sudo=True)\n require.directory('/var/run/%s' % env.project_name, owner='www-data', group='www-data', mode='770', use_sudo=True)\n require.directory('/var/log/%s' % env.project_name, owner='www-data', group='www-data', mode='770', use_sudo=True)\n require.directory('/var/log/supervisord/', owner='www-data', group='www-data', mode='770', use_sudo=True)\n require.directory('/var/run/supervisord/', owner='www-data', group='www-data', mode='770', use_sudo=True)\n\n require.deb.packages([\n 'gcc', 'python-all-dev', 'libpq-dev', 'libjpeg-dev', 'libxml2-dev', 'libxslt1-dev', 'libmysqlclient-dev',\n 'libfreetype6-dev', 'libevent-dev', 'supervisor'\n ])\n require.python.pip(version=\"1.0\")\n\n new_virtualenv()\n\n me = run('whoami')\n sudo('adduser %s www-data' % me)\n\n install_nginx()\n\n if env.mysql:\n require.mysql.server(password=env.mysql_password)\n with settings(mysql_user='root', mysql_password=env.mysql_password):\n require.mysql.user(env.mysql_username, env.mysql_password)\n require.mysql.database(env.mysql_dbname, owner=env.mysql_username)", "def __init__(self, *args, **kwargs):\n # Enrich the base name given in the form to add prefixes or randomness\n kwargs['dns_name'] = names.create_host_name(kwargs['base_name'])\n if ('setup_script' not in kwargs and 'git_repo' in kwargs and\n 'port' in kwargs):\n kwargs['setup_script'] = self.default_setup_script(**kwargs)\n super(Host, self).__init__(*args, **kwargs)", "def configure(node):\n script = []\n script.append(Statements.exec(\"hostname %s\" % node.getName()))\n script.append(Statements.createOrOverwriteFile(\n \"/etc/hostname\", [node.getName()]))\n script.append(Statements.exec(\n \"sed -i 's/127.0.0.1/127.0.0.1\\t%s/' /etc/hosts\" % node.getName()))\n return script", "def setup():\n sudo(\"minv_setup.sh\")", "def one_time_setup(node, rhbuild, branch: str) -> None:\n node.exec_command(\n cmd=f\"sudo rm -rf ceph && git clone --branch {branch} --single-branch --depth 1 {TEST_REPO}\"\n )\n os_ver = rhbuild.split(\"-\")[-1]\n ceph_ver = rhbuild.split(\"-\")[0]\n\n if os_ver == \"7\":\n node.exec_command(\n cmd=\"sed -i '49 a rbd feature disable testimg1 object-map fast-diff deep-flatten' \"\n \"ceph/qa/workunits/rbd/kernel.sh\"\n )\n\n if \"4.\" in ceph_ver:\n node.exec_command(\n cmd=\"sed -i 's/blocklist/blacklist/g' \"\n \"ceph/qa/workunits/rbd/krbd_exclusive_option.sh\"\n )\n\n try:\n node.exec_command(cmd=\"rpm -qa | grep xmlstarlet\")\n return\n except BaseException: # noqa\n pass\n\n EPEL_RPM = (\n f\"https://dl.fedoraproject.org/pub/epel/epel-release-latest-{os_ver}.noarch.rpm\"\n )\n\n commands = [\n {\"cmd\": f\"yum install -y {EPEL_RPM} --nogpgcheck\", \"sudo\": True},\n {\n \"cmd\": \"yum install -y xmlstarlet rbd-nbd qemu-img cryptsetup --nogpgcheck\",\n \"sudo\": True,\n },\n ]\n for command in commands:\n node.exec_command(**command)\n\n # Blind sleep to ensure the Mon service has restarted.\n # TODO: Identify a way to check the service is running\n sleep(5)", "def bootstrap():\n\n require('environment', provided_by=env.environments)\n sudo('mkdir -p %(root)s' % env, user=env.deploy_user)\n clone_repo()\n setup_dirs()\n link_config_files()\n update_services()\n create_virtualenv()\n update_requirements()\n create_local_settings()", "def setup():\n require('hosts', 'project_path', provided_by=envs.ENVS)\n\n if not exists(env.project_path):\n abort(red('Project path ({project_path}) does not exist. '\n 'Create it on the server before continuing.'.format(**env)))\n\n with cd(env.project_path):\n run('mkdir -p api renderer conf markup_renderer')\n run('mkdir -p api/static api/uploads')\n\n make_release_folders('api')\n make_release_folders('renderer')", "def init_host(self):\n\n LOG.debug(_('XManager init_host...'))\n\n pass", "def deploy():\n require('hosts', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('code_root')\n upload_tar_from_git(env.whole_path)\n install_requirements()\n symlink_current_release()\n migrate()\n restart_webservers()\n setup_permissions()\n collectstatic()", "def setup_machine():\n client = docker.from_env()\n if client.info().get(\"ServerVersion\") < \"18.09.2\":\n raise (\"Docker server needs to be at least 18.09.2\")\n ssh_path = os.path.join(expanduser(\"~\"), \".ssh\")\n cloud_path = os.path.join(ssh_path, \"cloud_keys\")\n config_path = os.path.join(cloud_path, \"config\")\n bash(\"mkdir -p {}\".format(cloud_path))\n bash(\"cp ~/.ssh/config ~/.ssh/{}/config\".format(\"cloud_keys\"))\n bash(\"sed -i '' '/.*UseKeychain.*/d' ~/.ssh/cloud_keys/config\")\n bash(\"sed -i '' '/.*ControlPath .*/d' ~/.ssh/cloud_keys/config\")\n\n config = \"\"\"\n Host *\n ControlPath /tmp/master-%r@%h:%p\n User {}\n \"\"\".format(\n getpass.getuser()\n )\n with open(config_path, \"r\") as h:\n conents = h.read()\n with open(config_path, \"w\") as h:\n h.write(config)\n with open(config_path, \"a\") as h:\n h.write(conents)\n keys = [\n splitext(x)[0]\n for x in glob.glob(os.path.join(ssh_path, \"*.pub\"))\n if not x.endswith(\"-cert.pub\") # filter out signed keys\n ]\n for key in keys:\n if not os.path.isfile(key):\n logger.warning(\"No private key for {}, skipping\".format(key))\n else:\n logger.info(\"Adding key {}\".format(key))\n dest = os.path.join(cloud_path, basename(key))\n if os.path.lexists(dest) is False:\n bash(\"cp {} {}\".format(key, dest))", "def deploy_me(self, type, platform, host_list):\n\n self.tmpl_dir = self.base_dir + '/templates'\n if not os.path.isfile(self.tmpl_dir + '/.initialized'):\n print \"\\tTemplates have not yet been initialized. Please first\"\n print \"\\tmake proper changes to the swift-setup.conf file and than\"\n print \"\\trun swift-setup init with sudo or as root user\\n\\n\"\n return False\n\n execute(self._common_setup, hosts=host_list)\n\n if type == 'admin':\n execute(self._admin_setup, hosts=host_list)\n elif type == 'generic':\n execute(self._swift_generic_setup, hosts=host_list)\n elif type == 'proxy':\n execute(self._swift_proxy_setup, hosts=host_list)\n elif type == 'storage':\n execute(self._swift_storage_setup, hosts=host_list)\n elif type == 'saio':\n execute(self._swift_saio_setup, hosts=host_list)\n\n disconnect_all()\n return True", "def start():\n _with_deploy_env(['./bin/paster serve src/remix/oerpub/rhaptoslabs/production.ini --daemon'])", "def setup_sshd(self):\n # Update apt repository\n command = 'apt update -y > /dev/null 2>&1'\n if self.debug is True:\n print('Executing apt update -y ')\n try:\n os.system('echo %s| sudo -S %s' % (self.sudo_pw, command))\n except:\n print(\"An error occured during 'apt update -u'\")\n\n # Install ssh package\n command = 'apt install ssh -y > /dev/null 2>&1'\n if self.debug is True:\n print('Executing apt install ssh -y')\n try:\n os.system('echo %s| sudo -S %s' % (self.sudo_pw, command))\n except:\n print(\"An error occured during 'apt install ssh -y' while installing ssh\")\n\n # Configure sshd using the config\n self.config_sshd()\n\n # Reload sshd config\n try:\n command = \"service ssh restart > /dev/null 2>&1\"\n os.system('echo %s| sudo -S %s' % (self.sudo_pw, command))\n print('SSHD_installed and configured successfully, SSHD listening on port {}'.format(self.ssh_port))\n except:\n print('An error occured during ssh \"sudo service ssh reload\" while installing ssh')", "def qa():\n env.hosts = ['[email protected]']\n env.directory = '/var/www/swordpushweb'", "def host_ubuntu_release(self):\r\n return self._host_ubuntu", "def AptInstall(vm):\n vm.RemoteCommand('wget -c '\n 'https://repo.mysql.com//mysql-apt-config_0.8.17-1_all.deb')\n vm.RemoteCommand('echo mysql-apt-config mysql-apt-config/select-server'\n ' select mysql-8.0 | sudo debconf-set-selections')\n vm.RemoteCommand('echo mysql-apt-config mysql-apt-config/select-product'\n ' select Ok | sudo debconf-set-selections')\n vm.RemoteCommand('sudo -E DEBIAN_FRONTEND=noninteractive dpkg -i'\n ' mysql-apt-config_0.8.17-1_all.deb')\n\n _, stderr = vm.RemoteCommand('sudo apt-get update', ignore_failure=True)\n\n if stderr:\n if 'public key is not available:' in stderr:\n # This error is due to mysql updated the repository and the public\n # key is not updated.\n # Import the updated public key\n match = re.match('.*NO_PUBKEY ([A-Z0-9]*)', stderr)\n if match:\n key = match.group(1)\n vm.RemoteCommand('sudo apt-key adv '\n f'--keyserver keyserver.ubuntu.com --recv-keys {key}')\n else:\n raise RuntimeError('No public key found by regex.')\n else:\n raise RuntimeError(stderr)\n\n vm.RemoteCommand('echo \"mysql-server-8.0 mysql-server/root_password password '\n f'{MYSQL_PSWD}\" | sudo debconf-set-selections')\n vm.RemoteCommand('echo \"mysql-server-8.0 mysql-server/root_password_again '\n f'password {MYSQL_PSWD}\" | sudo debconf-set-selections')\n vm.InstallPackages('mysql-server')", "def init_host(self, host):\n self._precreate_network()\n LOG.info(_LI(\"Create/Update Ntwork and Subnet, Done.\"))", "def init_host(self, host):\n if self._drv_nodes is None:\n self.set_nodes([nova_conf.host])\n args = (drv_conf.tenant_id, drv_conf.client_id, drv_conf.client_secret,\n drv_conf.subscription_id)\n\n self.compute_client = utils.get_compute_client(*args)\n self.resource_client = utils.get_resource_client(*args)\n self.network_client = utils.get_network_client(*args)\n is_resource_created = utils.check_resource_existence(\n self.resource_client, drv_conf.resource_group)\n if not is_resource_created:\n utils.create_resource_group(\n self.resource_client, drv_conf.resource_group, drv_conf.region)\n\n self.flavor_info.update(\n utils.get_vm_sizes(self.compute_client, drv_conf.region))\n LOG.info(\"%s driver init with %s project, %s region\" %\n (self.name, drv_conf.tenant_id, drv_conf.region))", "def _uwsgi_debian(as_service=True, use_pip=False):\n require.deb.package(\"python-dev\") # required to compile uwsgi\n if use_pip:\n require.python.package(\"uwsgi\")\n else:\n _uwsgi.manual_install()\n if as_service:\n require.file(\n \"/etc/systemd/system/uwsgi.service\",\n _uwsgi.UWSGI_SERVICE_TEMPLATE.format(\n owner=env.user, group=\"www-data\"\n ), use_sudo=True)\n require.directory(_uwsgi.UWSGI_SITES_LOCATION, use_sudo=True)\n require.service.started(\"uwsgi\")", "def _setup_imagr_environment():\n time.sleep(60) # wait for server to boot\n sudo(\"apt-get -y update\")\n sudo(\"apt-get -y upgrade\")\n sudo(\"apt-get -y remove python\") # remove current version of python\n sudo(\"apt-get -y install python-dev\")\n sudo(\"apt-get -y install postgresql-9.3\")\n sudo(\"apt-get -y install postgresql-server-dev-9.3\")\n sudo(\"apt-get -y install git\")\n sudo(\"apt-get -y install nginx\")\n sudo(\"apt-get -y install gunicorn\")", "def provision():\n sudo('chef-client')", "def verify_host(self):\n super().verify_host()\n if not self.use_docker:\n if self.tools.host_os != \"Linux\":\n raise UnsupportedHostError(self.supported_host_os_reason)", "def others_server():\n log('Instalando nginx e supervisor', yellow)\n sudo('apt-get -y install nginx supervisor')\n sudo('apt-get -y install mercurial')\n try:\n sudo('apt-get -y install ruby rubygems')\n except:\n log('PACOTE DO RUBY GEMS FOI REMOVIDO DO PACKAGES DO UBUNTU', red)\n\n # ubuntu 12\n # sudo('apt-get -y install php5-fpm php5-suhosin php-apc php5-gd php5-imagick php5-curl')\n\n # ubuntu 14\n sudo('apt-get -y install php5-fpm php-apc php5-gd php5-imagick php5-curl php5-cli php5-mysql')\n sudo('apt-get -y install proftpd') # standalone nao perguntar\n\n # ubuntu 14\n sudo( 'apt-get install ruby-dev' )\n sudo('gem install compass')", "def init():\r\n if not env.hosts:\r\n _init_local()\r\n else:\r\n _init_remote()", "def deploy():", "def build_server():\n log('Instalando build-essential e outros pacotes', yellow)\n sudo('apt-get -y install build-essential automake')\n sudo('apt-get -y install libxml2-dev libxslt-dev')\n sudo('apt-get -y install libjpeg-dev libjpeg8-dev zlib1g-dev libfreetype6 libfreetype6-dev')\n\n # Then, on 32-bit Ubuntu, you should run:\n\n # sudo ln -s /usr/lib/i386-linux-gnu/libfreetype.so /usr/lib/\n # sudo ln -s /usr/lib/i386-linux-gnu/libz.so /usr/lib/\n # sudo ln -s /usr/lib/i386-linux-gnu/libjpeg.so /usr/lib/\n\n # Otherwise, on 64-bit Ubuntu, you should run:\n\n sudo( 'ln -s /usr/lib/x86_64-linux-gnu/libfreetype.so /usr/lib/' )\n sudo( 'ln -s /usr/lib/x86_64-linux-gnu/libz.so /usr/lib/' )\n sudo( 'ln -s /usr/lib/x86_64-linux-gnu/libjpeg.so /usr/lib/' )", "def enable_host(self, name):\n from soppa.local import aslocal\n self.guest_ip = self.guest_ip()\n self.guest_host_name = name\n # Host (remote) change\n self.file.set_setting('/etc/hosts', '{0} {1}'.format('127.0.0.1', self.guest_host_name))\n # local change\n aslocal()\n self.file.set_setting('/etc/hosts', '{0} {1}'.format(self.guest_ip, name))", "def do_security_setup(run_as_user, branch, base_path, dist_path, enable=True):\n \n if not enable:\n #disable security setup if enabled\n runcmd(\"apt-get -y remove unattended-upgrades fail2ban psad rkhunter chkrootkit logwatch apparmor auditd iwatch\")\n return\n \n #modify host.conf\n modify_config(r'^nospoof on$', 'nospoof on', '/etc/host.conf')\n \n #enable automatic security updates\n runcmd(\"apt-get -y install unattended-upgrades\")\n runcmd('''bash -c \"echo -e 'APT::Periodic::Update-Package-Lists \"1\";\\nAPT::Periodic::Unattended-Upgrade \"1\";' > /etc/apt/apt.conf.d/20auto-upgrades\" ''')\n runcmd(\"dpkg-reconfigure -fnoninteractive -plow unattended-upgrades\")\n \n #sysctl\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/sysctl_rules.conf /etc/sysctl.d/60-tweaks.conf\" % dist_path)\n\n #set up fail2ban\n runcmd(\"apt-get -y install fail2ban\")\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/fail2ban.jail.conf /etc/fail2ban/jail.d/counterblock.conf\" % dist_path)\n runcmd(\"service fail2ban restart\")\n \n #set up psad\n runcmd(\"apt-get -y install psad\")\n modify_config(r'^ENABLE_AUTO_IDS\\s+?N;$', 'ENABLE_AUTO_IDS\\tY;', '/etc/psad/psad.conf')\n modify_config(r'^ENABLE_AUTO_IDS_EMAILS\\s+?Y;$', 'ENABLE_AUTO_IDS_EMAILS\\tN;', '/etc/psad/psad.conf')\n for f in ['/etc/ufw/before.rules', '/etc/ufw/before6.rules']:\n modify_config(r'^# End required lines.*?# allow all on loopback$',\n '# End required lines\\n\\n#CUSTOM: for psad\\n-A INPUT -j LOG\\n-A FORWARD -j LOG\\n\\n# allow all on loopback',\n f, dotall=True)\n runcmd(\"psad -R && psad --sig-update\")\n runcmd(\"service ufw restart\")\n runcmd(\"service psad restart\")\n \n #set up chkrootkit, rkhunter\n runcmd(\"apt-get -y install rkhunter chkrootkit\")\n runcmd('bash -c \"rkhunter --update; exit 0\"')\n runcmd(\"rkhunter --propupd\")\n runcmd('bash -c \"rkhunter --check --sk; exit 0\"')\n runcmd(\"rkhunter --propupd\")\n \n #logwatch\n runcmd(\"apt-get -y install logwatch libdate-manip-perl\")\n \n #apparmor\n runcmd(\"apt-get -y install apparmor apparmor-profiles\")\n \n #auditd\n #note that auditd will need a reboot to fully apply the rules, due to it operating in \"immutable mode\" by default\n runcmd(\"apt-get -y install auditd audispd-plugins\")\n runcmd(\"install -m 0640 -o root -g root -D %s/linux/other/audit.rules /etc/audit/rules.d/counterblock.rules\" % dist_path)\n modify_config(r'^USE_AUGENRULES=.*?$', 'USE_AUGENRULES=\"yes\"', '/etc/default/auditd')\n runcmd(\"service auditd restart\")\n\n #iwatch\n runcmd(\"apt-get -y install iwatch\")\n modify_config(r'^START_DAEMON=.*?$', 'START_DAEMON=true', '/etc/default/iwatch')\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/iwatch.xml /etc/iwatch/iwatch.xml\" % dist_path)\n modify_config(r'guard email=\"root@localhost\"', 'guard email=\"noreply@%s\"' % socket.gethostname(), '/etc/iwatch/iwatch.xml')\n runcmd(\"service iwatch restart\")", "def main():\n logging.basicConfig(stream=sys.stderr, level=logging.DEBUG,\n format='%(name)s (%(levelname)s): %(message)s')\n\n kvm = pathlib.Path(\"/disks/d/VMWare/KVM\")\n os = { \n 'win7':'/disks/d/OS/Windows/Windows_7/Windows_7_LITE_X64.iso',\n 'win7_full':'/disks/d/OS/Windows/Windows_7/fr_windows_7_ultimate_x64_dvd_x15-65928.iso',\n 'mint':'/disks/d/OS/Unix/Mint/linuxmint-18.3-cinnamon-64bit.iso',\n 'solaris':'/disks/d/OS/Unix/Solaris/11/sol-11_3-text-x86.iso'\n }\n\n try:\n arguments = parse_command_line(sys.argv)\n # Assign args to variables\n server = arguments.name\n os = arguments.os\n legacy = arguments.legacy\n dry = arguments.dry_run\n if kvm.path.exists():\n kvm_disk = kvm_path + server\n command = \"virt-install --ram 2048 --disk path=${DIR_HOST}/${HOST}.qcow2,size=8 --vcpus 2 --os-type linux --os-variant ubuntuquantal --network bridge=virbr0\"\n if dry:\n print(command)\n print(kvm_disk)\n\n except KeyboardInterrupt:\n log.error('Program interrupted!')\n finally:\n logging.shutdown()", "def deploy(c, _hosts=\"\"):\n eve = DeployHost(\"eve.i\", user=\"root\")\n if _hosts != \"\":\n hosts = get_hosts(_hosts)\n else:\n hosts = [\n eve,\n DeployHost(\n \"localhost\",\n user=\"joerg\",\n meta=dict(\n extra_args=[\"--use-remote-sudo\"],\n flake_path=\"/home/joerg/.homesick/repos/dotfiles\",\n ),\n forward_agent=True,\n ),\n DeployHost(\n \"eve.i\",\n user=\"root\",\n forward_agent=True,\n command_prefix=\"eva.r\",\n meta=dict(target_host=\"eva.i\", flake_attr=\"eva\"),\n ),\n DeployHost(\n \"eve.i\",\n user=\"root\",\n forward_agent=True,\n command_prefix=\"blob64.r\",\n meta=dict(target_host=\"blob64.r\", flake_attr=\"blob64\"),\n ),\n ]\n deploy_nixos(hosts)\n eve.run(\"systemctl restart buildbot-master\")", "def create_host(self, conf, tenant_id, network_id, params):\n\t\tpass", "def set_host(host_index):\n env.hosts = [public_dns_names[int(host_index)]]\n env.password = [public_pwds[int(host_index)]]", "def deploy_common_services():\n put('./minion/*', '/etc/systemd/system', use_sudo=True)\n sudo('source /etc/environment')\n sudo('/opt/bin/substitute_private_ipv4.sh /etc/systemd/system/flannel.service')\n sudo('/opt/bin/substitute_private_ipv4.sh /etc/systemd/system/kubelet.service')\n\n sudo('systemctl enable /etc/systemd/system/flannel.service')\n sudo('systemctl enable /etc/systemd/system/docker.service')\n sudo('systemctl enable /etc/systemd/system/kube-proxy.service')\n sudo('systemctl enable /etc/systemd/system/kubelet.service')\n\n sudo('systemctl daemon-reload')\n\n sudo('systemctl start flannel')\n sudo('systemctl start docker')\n sudo('systemctl start kube-proxy')\n sudo('systemctl start kubelet')", "def host_bootstrap(args):\n name = args.name\n host = args.host\n port = args.port\n user = args.user\n protocol = args.protocol\n url = args.url\n pool = args.pool\n poolpath = args.poolpath\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n baseconfig.bootstrap(name, host, port, user, protocol, url, pool, poolpath)", "def generate_debootstrap_rootfs(self):\n\n logging.info(\"starting to generate debootstrap rootfs\")\n\n # Generate the base debootstrap command\n debootstrap_command = \"sudo debootstrap --no-check-gpg\"\n\n # Add the foreign and arch only if they are different from host, and\n # thus if use_qemu_static is True\n if self.use_qemu_static:\n logging.info(\"running debootstrap stage 1\")\n debootstrap_command += \" --foreign --arch=\" + self.project.target_arch\n else:\n logging.info(\"running debootstrap\")\n\n # Add the target, mount point and repository url to the debootstrap command\n debootstrap_command += \" \" + self.project.target_version + \" \"\n debootstrap_command += self.project.rootfs_mountpoint + \" \"\n debootstrap_command += self.project.project_definition[\"project-definition\"][\"debootstrap-repository\"]\n\n # Finally run the subprocess\n self.execute_command(debootstrap_command)\n\n # Check if we are working with foreign arch, then ...\n if self.use_qemu_static:\n # QEMU is used, and we have to install it into the target\n self.setup_qemu()\n\n # And second stage must be run\n logging.info(\"doing debootstrap stage 2\")\n debootstrap_command = \"LANG=C sudo chroot \" + self.project.rootfs_mountpoint\n debootstrap_command += \" /debootstrap/debootstrap --second-stage\"\n self.execute_command(debootstrap_command)\n\n\n # Mount bind /proc into the rootfs mountpoint\n sudo_command = \"sudo mount --bind --make-rslave /proc \" + self.project.rootfs_mountpoint + \"/proc\"\n self.execute_command(sudo_command)\n self.proc_is_mounted = True\n\n # Mount bind /dev/pts into the rootfs mountpoint\n sudo_command = \"sudo mount --bind --make-rslave /dev/pts \" + self.project.rootfs_mountpoint + \"/dev/pts\"\n self.execute_command(sudo_command)\n self.devpts_is_mounted = True\n\n # Mount bind /dev/shm into the rootfs mountpoint\n sudo_command = \"sudo mount --bind --make-rslave /dev/shm \" + self.project.rootfs_mountpoint + \"/dev/shm\"\n self.execute_command(sudo_command)\n self.devshm_is_mounted = True\n\n # Update the APT sources\n self.generate_apt_sources_configuration()\n\n # Then update the list of packages\n apt_command = \"sudo chroot \" + self.project.rootfs_mountpoint + \" /usr/bin/apt-get update\"\n self.execute_command(apt_command)\n\n # Install extra packages into the chroot\n apt_command = \"sudo chroot \" + self.project.rootfs_mountpoint + \" /usr/bin/apt-get install --no-install-recommends --yes --allow-unauthenticated apt-utils ansible\"\n self.execute_command(apt_command)\n\n # Generate a unique build timestamp into /etc/dft_version\n self.generate_build_number()", "def set_host_aliases():\n with open('/tmp/hosts', 'w') as f:\n uname = os.uname()\n f.write(f'{uname.nodename} localhost\\n')\n os.environ['HOSTALIASES'] = '/tmp/hosts'", "def setup_node(\n *,\n # Change this to take host, user, and identity_file?\n # Add some kind of caching for SSH connections so that they\n # can be looked up by host and reused?\n ssh_client: paramiko.client.SSHClient,\n services: list,\n cluster: FlintrockCluster):\n host = ssh_client.get_transport().getpeername()[0]\n ssh_check_output(\n client=ssh_client,\n command=\"\"\"\n set -e\n\n echo {private_key} > \"$HOME/.ssh/id_rsa\"\n echo {public_key} >> \"$HOME/.ssh/authorized_keys\"\n\n chmod 400 \"$HOME/.ssh/id_rsa\"\n \"\"\".format(\n private_key=shlex.quote(cluster.ssh_key_pair.private),\n public_key=shlex.quote(cluster.ssh_key_pair.public)))\n\n with ssh_client.open_sftp() as sftp:\n sftp.put(\n localpath=os.path.join(SCRIPTS_DIR, 'setup-ephemeral-storage.py'),\n remotepath='/tmp/setup-ephemeral-storage.py')\n\n logger.info(\"[{h}] Configuring ephemeral storage...\".format(h=host))\n # TODO: Print some kind of warning if storage is large, since formatting\n # will take several minutes (~4 minutes for 2TB).\n storage_dirs_raw = ssh_check_output(\n client=ssh_client,\n command=\"\"\"\n set -e\n python /tmp/setup-ephemeral-storage.py\n rm -f /tmp/setup-ephemeral-storage.py\n \"\"\")\n storage_dirs = json.loads(storage_dirs_raw)\n\n cluster.storage_dirs.root = storage_dirs['root']\n cluster.storage_dirs.ephemeral = storage_dirs['ephemeral']\n\n ensure_java8(ssh_client)\n\n for service in services:\n service.install(\n ssh_client=ssh_client,\n cluster=cluster)", "def set_hostname(hostname=None, deploy=False):\n\n if not hostname:\n raise CommandExecutionError(\"Hostname option must not be none.\")\n\n ret = {}\n\n query = {\n \"type\": \"config\",\n \"action\": \"set\",\n \"xpath\": (\n \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system\"\n ),\n \"element\": \"<hostname>{}</hostname>\".format(hostname),\n }\n\n ret.update(__proxy__[\"panos.call\"](query))\n\n if deploy is True:\n ret.update(commit())\n\n return ret", "def _add_node_to_etc_hosts(self):\n image = 'alpine:latest'\n command = 'echo \"{} {} # clusterdock\" >> /etc/hosts'.format(self.ip_address,\n self.fqdn)\n volumes = {'/etc/hosts': {'bind': '/etc/hosts', 'mode': 'rw'}}\n\n logger.debug('Adding %s to /etc/hosts ...', self.fqdn)\n client.containers.run(image=image,\n command=[self.execute_shell, '-c', command],\n volumes=volumes,\n remove=True)", "def vdbench_deploy(host):\n if host.host_type == 'linux':\n vdbench_exe = \"{}/vdbench\".format(VDBENCH_EXE_LOC)\n # check vdbench\n log.info(\"Check if vdbench already present\")\n if host.is_path_exists(vdbench_exe):\n log.info(\"vdbench exe already present on host {}\".format(host))\n return\n # check java & deploy if not available\n log.info(\"Verify if java alredy presnt\")\n status, output, error = host.conn.execute_command(['java', '-version'])\n if status:\n log.info(output)\n log.error(error)\n else:\n log.info(\"java already present on host {}\".format(host))\n if not any(\"openjdk version\" in line for line in error):\n log.info(\"Deploying Java on host {}\".format(host))\n host.deploy('java')\n # copy vdbench source files to the host\n log.info(\"Copying vdbench source files to host {}\".format(host))\n host.conn.scp_put(localpath=VDBENCH_EXE_LOC,\n remotepath=os.path.dirname(VDBENCH_EXE_LOC),\n recursive=True)\n log.info(\"Successfully copied vdbench source files to host {}\".format(host))\n elif host.host_type == 'windows':\n # vdbench_exe = \"{}\\\\vdbench.bat\".format(WIN_VDBENCH_EXE_LOC)\n pass", "def do_base_setup(run_as_user, branch, base_path, dist_path):\n #change time to UTC\n runcmd(\"ln -sf /usr/share/zoneinfo/UTC /etc/localtime\")\n\n #install some necessary base deps\n runcmd(\"apt-get update\")\n runcmd(\"apt-get -y install git-core software-properties-common python-software-properties build-essential ssl-cert ntp runit\")\n \n #install node-js\n #node-gyp building has ...issues out of the box on Ubuntu... use Chris Lea's nodejs build instead, which is newer\n runcmd(\"apt-get -y remove nodejs npm gyp\")\n runcmd(\"add-apt-repository -y ppa:chris-lea/node.js\")\n runcmd(\"apt-get update\")\n runcmd(\"apt-get -y install nodejs\") #includes npm\n gypdir = None\n try:\n import gyp\n gypdir = os.path.dirname(gyp.__file__)\n except:\n pass\n else:\n runcmd(\"mv %s %s_bkup\" % (gypdir, gypdir))\n #^ fix for https://github.com/TooTallNate/node-gyp/issues/363\n\n #Create xcp user, under which the files will be stored, and who will own the files, etc\n try:\n pwd.getpwnam(USERNAME)\n except:\n logging.info(\"Creating user '%s' ...\" % USERNAME)\n runcmd(\"adduser --system --disabled-password --shell /bin/false --group %s\" % USERNAME)\n \n #Create xcpd user (to run counterpartyd, counterblockd, insight, bitcoind, nginx) if not already made\n try:\n pwd.getpwnam(DAEMON_USERNAME)\n except:\n logging.info(\"Creating user '%s' ...\" % DAEMON_USERNAME)\n runcmd(\"adduser --system --disabled-password --shell /bin/false --ingroup nogroup --home %s %s\" % (USER_HOMEDIR, DAEMON_USERNAME))\n \n #add the run_as_user to the xcp group\n runcmd(\"adduser %s %s\" % (run_as_user, USERNAME))\n \n #Check out counterpartyd-build repo under this user's home dir and use that for the build\n git_repo_clone(\"counterpartyd_build\", \"https://github.com/CounterpartyXCP/counterpartyd_build.git\",\n os.path.join(USER_HOMEDIR, \"counterpartyd_build\"), branch, for_user=run_as_user)\n\n #enhance fd limits for the xcpd user\n runcmd(\"cp -af %s/linux/other/xcpd_security_limits.conf /etc/security/limits.d/\" % dist_path)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"host\", type=str, nargs=\"+\")\n parser.add_argument(\"--user\", type=str, default=getpass.getuser())\n parser.add_argument(\"--path\", type=str, required=True)\n parser.add_argument(\"--keep\", type=int, default=3)\n parser.add_argument(\"--deployuser\", type=str, default=None)\n parser.add_argument(\"--postcmd\", type=str, default=None)\n\n args = parser.parse_args()\n if args.host is None:\n parser.print_usage()\n sys.exit(1)\n\n if args.deployuser is None:\n args.deployuser = args.user\n\n init(autoreset=True)\n deploy(args)", "def install(where='local'):\n config = get_config(where)\n print 'using configuration: %s' % config\n with settings(host_string=config['host_string']):\n if not files.exists(config['installation_dir']):\n run('git clone %(git_repo)s %(installation_dir)s' % config)\n with cd(config['installation_dir']):\n run('git submodule init')\n run('git submodule update --init')\n\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n run('python2.7 bootstrap.py -c %(cfg)s' % config)\n deploy(where)\n secs = 4\n sleep(secs)\n init_db(where)", "def gen_host(host_name=None, host_vars={}):\n ssh_host = host_vars.get('ip', host_name)\n ssh_port = host_vars.get('port', ANS_CONS.DEFAULT_REMOTE_PORT)\n ssh_user = host_vars.get('username')\n ssh_pass = host_vars.get('password')\n ssh_fkey = host_vars.get('ssh_key')\n # init Host\n host = Host(name=host_name, port=ssh_port)\n host.set_variable('ansible_ssh_host', ssh_host)\n # shortcut variables\n ssh_user and host.set_variable('ansible_ssh_user', ssh_user)\n ssh_pass and host.set_variable('ansible_ssh_pass', ssh_pass)\n ssh_fkey and host.set_variable('ansible_private_key_file', ssh_fkey)\n # extra variables\n for key, value in host_vars.iteritems():\n if key not in ['ip', 'port', 'username', 'password', 'ssh_key']:\n host.set_variable(key, value)\n # return Host object\n return host", "def test_deploy_with_vdev(self):\n # back up user_root_vdev value in config file\n def _restore_conf(root_vdev_back):\n CONF.zvm.user_root_vdev = root_vdev_back\n root_vdev_back = CONF.zvm.user_root_vdev\n self.addCleanup(_restore_conf, root_vdev_back)\n\n new_root = '123'\n CONF.zvm.user_root_vdev = new_root\n disks = [\n {'size': '3G',\n 'format': 'xfs',\n 'is_boot_disk': True,\n 'disk_pool': CONF.zvm.disk_pool},\n {'size': '200M',\n 'format': 'ext3',\n 'is_boot_disk': False,\n 'disk_pool': 'ECKD:xcateckd'}]\n\n self.sdkapi.guest_create(self.userid, 1, 1024, disk_list=disks)\n self.sdkapi.guest_deploy(self.userid,\n self.image_name,\n vdev=new_root)\n self.sdkapi.guest_start(self.userid)\n powered_on = self.test_util.wait_until_guest_in_power_state(\n self.userid, 'on')\n self.assertTrue(powered_on)", "def deploy(self):\n\n # Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.\n if not self.vm_deploy:\n return\n\n self.connection = ssh.SSH.from_node(self.host_mgmt)\n self.dpdk_nic_bind = provision_tool(\n self.connection,\n os.path.join(get_nsb_option(\"bin_path\"), \"dpdk-devbind.py\"))\n\n # Check dpdk/ovs version, if not present install\n self.check_ovs_dpdk_env()\n # Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.\n StandaloneContextHelper.install_req_libs(self.connection)\n self.networks = StandaloneContextHelper.get_nic_details(self.connection,\n self.networks,\n self.dpdk_nic_bind)\n\n self.setup_ovs()\n self.start_ovs_serverswitch()\n self.setup_ovs_bridge_add_flows()\n self.nodes = self.setup_ovs_dpdk_context()\n LOG.debug(\"Waiting for VM to come up...\")\n self.nodes = StandaloneContextHelper.wait_for_vnfs_to_start(self.connection,\n self.servers,\n self.nodes)", "def __setup_deploy(self):\n init_dict = dict()\n\n # Fetch and store platform\n init_dict['os'] = platform.system()\n # Fetch, validate and store target ip address\n while True:\n address = input('Please enter the IP address: ')\n try:\n ipaddress.ip_address(address)\n init_dict['ip'] = address\n break\n except Exception as error:\n print('Error: ', error)\n\n # Dump data into file\n self.settings_file.touch()\n with self.settings_file.open('w') as file:\n json.dump(init_dict, file, indent=4)", "def install():\n deploy()\n configure()", "def setup_node(config, args):\n if args.templates:\n config['templates'].insert(0, args.templates)\n config.templates = config._templates()\n stdin, stderr = args.node.ssh(config.template(\"scripts/node_setup.sh\"))\n if stderr:\n print stderr\n else:\n print u\"Node ready at %s\" % (args.node.hostname)", "def staging():\n env.hosts = ['staging.example.com']", "def createHost(self):\n self.createUser()\n self.user.host_for = [self.program.scope.key()]\n self.user.put()", "def production():\n env.config_file = 'config_production.py'\n env.hosts = ['[email protected]']\n env.host_type = 'production'\n env.user = 'ombu'\n env.host_webserver_user = 'nginx'\n env.host_site_path = '/home/ombu/webapps/ombuweb'", "def _start(self, host):\n pass", "def bootstrap():\n _require_environment()\n\n adduser()\n install_python()\n install_git()\n install_apache()\n install_mysql()\n setup_project()", "def setup_dirs():\n\n require('environment', provided_by=env.environments)\n sudo('mkdir -p %(log_dir)s' % env, user=env.deploy_user)\n sudo('chmod a+w %(log_dir)s' % env )\n sudo('mkdir -p %(services)s/nginx' % env, user=env.deploy_user)\n sudo('mkdir -p %(services)s/supervisor' % env, user=env.deploy_user)\n sudo('mkdir -p %(services)s/gunicorn' % env, user=env.deploy_user)\n sudo('mkdir -p %(media_root)s' % env)\n sudo('chown %(webserver_user)s %(media_root)s' % env)\n sudo('mkdir -p %(static_root)s' % env)\n sudo('chown %(webserver_user)s %(static_root)s' % env)", "def usb_setup():\n print(\"Warning: using deprecated usb_setup routine!\")\n largest = largest_partition()\n medium = medium_partition()\n smallest = smallest_partition()\n\n print(\"Starting USB installation\")\n print(\"Using {} as archive storage\".format(largest))\n print(\"Using {} as volatile storage\".format(medium))\n print(\"Using {} as important storage\".format(smallest))\n\n lncm_usb = \"/usr/local/sbin/lncm-usb\"\n\n cli_invocation = [\n lncm_usb,\n largest,\n medium,\n smallest,\n get_uuid(largest),\n get_uuid(medium),\n get_uuid(smallest),\n str(largest_part_size()),\n ]\n\n call(cli_invocation)", "def upgrade_kernel_node(*args):\n for host_string in args:\n with settings(host_string=host_string):\n dist, version, extra = get_linux_distro()\n print \"upgrading apparmor before upgrading kernel\"\n if version == '12.04':\n apt_install([\"apparmor\"])\n print \"Installing 3.13.0-34 kernel headers\"\n apt_install([\"linux-headers-3.13.0-34\"])\n apt_install([\"linux-headers-3.13.0-34-generic\"])\n print \"Upgrading the kernel to 3.13.0-34\"\n apt_install([\"linux-image-3.13.0-34-generic\"])\n elif version == '14.04':\n print \"Installing 3.13.0-40 kernel headers\"\n apt_install([\"linux-headers-3.13.0-40\",\n \"linux-headers-3.13.0-40-generic\"])\n print \"Upgrading the kernel to 3.13.0-40\"\n apt_install([\"linux-image-3.13.0-40-generic\",\n \"linux-image-extra-3.13.0-40-generic\"])", "def localhost():\n env.cd = lcd\n env.run = lrun\n env.hosts = ['localhost']\n env.exists = os.path.isdir", "def bootstrapFrontend(serverName, serverPort, sslPublicCertPath,\n sslPrivateCertPath):\n # Upload files\n put(sslPublicCertPath, 'fluidinfo.pem')\n put(sslPrivateCertPath, 'fluidinfo.key')\n\n # Install requirements.\n sudo('DEBIAN_FRONTEND=noninteractive apt-get install -y nginx haproxy')\n\n # Set up haproxy.\n sudo('/etc/init.d/haproxy stop')\n deployConfigFiles(\n {'server-name': serverName},\n\n ('haproxy/haproxy.cfg', '/etc/haproxy/haproxy.cfg'),\n ('haproxy/haproxy-default', '/etc/default/haproxy'))\n\n sudo('mkdir -p ../var/run/haproxy')\n sudo('chown haproxy:haproxy ../var/run/haproxy')\n sudo('/etc/init.d/haproxy start')\n sudo('curl --silent http://127.0.0.1:9000 > /dev/null && echo Works!')\n\n # Set up nginx.\n sudo('/etc/init.d/nginx stop')\n sudo('mkdir -p /etc/nginx/ssl')\n sudo('mv fluidinfo.pem /etc/nginx/ssl')\n sudo('chmod 600 /etc/nginx/ssl/fluidinfo.pem')\n sudo('mkdir -p /var/lib/fluidinfo/logs')\n\n sudo('mv fluidinfo.key /etc/nginx/ssl')\n sudo('chmod 600 /etc/nginx/ssl/fluidinfo.key')\n deployConfigFiles(\n {'server-name': serverName},\n\n ('nginx/fluidinfo-secure.conf.template',\n '/etc/nginx/sites-available/{server-name}'))\n\n sudo('ln -sf /etc/nginx/sites-available/{0} '\n '/etc/nginx/sites-enabled/{0}'.format(serverName))\n sudo('rm -f /etc/nginx/sites-enabled/default')\n sudo('/etc/init.d/nginx start')\n time.sleep(1)\n sudo('curl --silent http://127.0.0.1:%d > /dev/null && echo Works!'\n % serverPort)", "def dev_up():\n _with_deploy_env(['./bin/develop up'])", "def qa():\n env.config_file = 'config_production.py'\n env.hosts = ['[email protected]:34165']\n env.host_type = 'qa'\n env.user = 'ombu'\n env.host_webserver_user = 'www-data'\n env.host_site_path = '/mnt/main/qa/qa2/public'", "def setup_url_for_address(host, port):\n\n # Force hostnames into IP addresses\n try:\n # Attempt to register {host} as an IP address; if this fails ({host} is\n # not an IP address), this will throw a ValueError.\n ip_address(host)\n except ValueError:\n # The provided {host} should be treated as a hostname.\n host = hostname_lookup(host)\n\n # Automatically determine the port if not provided.\n if not port:\n port = probe_wemo(host)\n\n if not port:\n return None\n\n return \"http://%s:%s/setup.xml\" % (host, port)", "def dev():\n env.hosts = ['']\n env.user = ''\n env.virtualenv_dir = ''\n env.code_dir = ''\n env.var_dir = ''\n env.activate = 'source %s/bin/activate' % env.virtualenv_dir\n env.backup_on_deploy = False", "def onehost(cls, host, host_tags, host_base_path, host_dir_name, args,\n parallel, configuration, remove_remote_files_dirs=False):\n\n log = logging.getLogger(host)\n\n try:\n remote_exec, remote_copy, cleanup_cmd = cls._resolve_remote_cmds(\n args)\n log.debug(\"remote_exec for host \\\"{}\\\": {}\".format(\n host, remote_exec))\n log.debug(\"remote_copy for host \\\"{}\\\": {}\".format(\n host, remote_copy))\n\n family = cls._address_family(args)\n log.debug(\"address family: {}\".format(family))\n target_host = cls.resolve_target_addresses(host, family)\n log.debug(\"target_host for host \\\"{}\\\": {}\".format(\n host, target_host))\n\n local = cdist.exec.local.Local(\n target_host=target_host,\n target_host_tags=host_tags,\n base_root_path=host_base_path,\n host_dir_name=host_dir_name,\n initial_manifest=args.manifest,\n add_conf_dirs=args.conf_dir,\n cache_path_pattern=args.cache_path_pattern,\n quiet_mode=args.quiet,\n configuration=configuration,\n exec_path=sys.argv[0],\n save_output_streams=args.save_output_streams)\n\n remote = cdist.exec.remote.Remote(\n target_host=target_host,\n remote_exec=remote_exec,\n remote_copy=remote_copy,\n base_path=args.remote_out_path,\n quiet_mode=args.quiet,\n archiving_mode=args.use_archiving,\n configuration=configuration,\n stdout_base_path=local.stdout_base_path,\n stderr_base_path=local.stderr_base_path,\n save_output_streams=args.save_output_streams)\n\n cleanup_cmds = []\n if cleanup_cmd:\n cleanup_cmds.append(cleanup_cmd)\n c = cls(local, remote, dry_run=args.dry_run, jobs=args.jobs,\n cleanup_cmds=cleanup_cmds,\n remove_remote_files_dirs=remove_remote_files_dirs)\n c.run()\n cls._remove_paths()\n\n except cdist.Error as e:\n log.error(e)\n if parallel:\n return (host, False, )\n else:\n raise\n\n if parallel:\n return (host, True, )", "def _prepare_publish_environments():\n env = copy.deepcopy(os.environ)\n\n project_name = os.getenv(\"AVALON_PROJECT\")\n asset_name = os.getenv(\"AVALON_ASSET\")\n\n env[\"AVALON_PROJECT\"] = project_name\n env[\"AVALON_ASSET\"] = asset_name\n env[\"AVALON_TASK\"] = os.getenv(\"AVALON_TASK\")\n env[\"AVALON_WORKDIR\"] = os.getenv(\"AVALON_WORKDIR\")\n env[\"AVALON_APP\"] = f\"hosts.{publish_host}\"\n env[\"AVALON_APP_NAME\"] = \"celaction/local\"\n\n env[\"PYBLISH_HOSTS\"] = publish_host\n\n os.environ.update(env)", "def deploy_app(host_=None):\n run_command_on_selected_server(_deploy_app, host_=host_)", "def configure_master_host(master_node, slave_nodes, host_type='linux'):\n if (host_type=='linux'):\n check_firewalld(master_node)\n if not key_is_present(master_node):\n generate_key(master_node)\n push_key_to_slave(master_node, slave_nodes)", "def __setup_deploy(self):\r\n # Create a SSH Key-pair and push it to the robot\r\n if not self.ssh_key.exists():\r\n subprocess.run(['ssh-keygen',\r\n '-b', '4096',\r\n '-t', 'rsa',\r\n '-f', self.ssh_key,\r\n '-q', '-N', ''\r\n ])\r\n\r\n os.chmod(self.ssh_key, 0o600)\r\n os.chmod(self.ssh_pub, 0o600)\r\n print('Please enter the password if asked.')\r\n subprocess.run(\r\n ['ssh-copy-id',\r\n '-i', self.ssh_key,\r\n 'robot@{}'.format(self.settings['ip'])\r\n ], stderr=open(os.devnull, 'wb'))\r\n print('Try to log into the brick:')\r\n print('\\tssh -i {} robot@{}'.format(self.ssh_key, self.settings['ip']))", "def localhost():\n env.run = local\n env.cd = lcd\n env.deployment = 'local'", "def deploy_package(package_path, host):\n\n package_name = package_path.name\n\n result = Connection(host).put(package_path, remote=\"/tmp/\", preserve_mode=False)\n result = Connection(host).sudo(\n \"dpkg --force-confdef --force-confold -i /tmp/{}\".format(package_name)\n )", "def setup_dhcp_config(self, board_config):\n raise NotImplementedError", "def setup_ipv6():\n ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()\n if CompareHostReleases(ubuntu_rel) < \"trusty\":\n raise Exception(\"IPv6 is not supported in the charms for Ubuntu \"\n \"versions less than Trusty 14.04\")\n\n # Need haproxy >= 1.5.3 for ipv6 so for Trusty if we are <= Kilo we need to\n # use trusty-backports otherwise we can use the UCA.\n if (ubuntu_rel == 'trusty' and\n CompareOpenStackReleases(os_release('keystone')) < 'liberty'):\n add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports '\n 'main')\n apt_update()\n apt_install('haproxy/trusty-backports', fatal=True)", "def setup():\n\n header(title=\"Setup\", domain_name=domain_name)\n\n update_route53domains_dns = config.get('update_route53domains_dns')\n if update_route53domains_dns is None:\n update_route53domains_dns = True\n\n print(\"\")\n sp.info('Distribution: %s' % distribution)\n\n if not client.site_exists:\n client.s3_create_site()\n sp.succeed(\"Site created on S3: OK\")\n\n # Distribution: s3|route53|cloudfront\n #\n if distribution in [\"route53\", \"cloudfront\"]:\n # setup route53\n client.s3_update_route53_a_records()\n sp.succeed('DNS updated on Route53: OK')\n\n # update domains DNS\n if update_route53domains_dns is True:\n if client.route53domains_update_dns():\n sp.succeed('Domain Name Servers updated: OK')\n\n # cloudfront specific\n if distribution == 'cloudfront':\n\n # SSL: Certificate\n cert_status = client.acm_get_certificate_status()\n if not cert_status:\n sp.info('Creating new ACM SSL certificate')\n client.acm_generate_certificate()\n time.sleep(2)\n cert_status = client.acm_get_certificate_status()\n time.sleep(2)\n sp.succeed('Created SSL certificate: OK')\n sp.succeed('Certificate status: %s ' % cert_status)\n\n # Update the CNAME with ACM route53 data\n if cert_status != \"ISSUED\":\n time.sleep(2)\n if client.acm_update_route53_cname_records():\n sp.succeed('Set SSL certificate Route53 CNAME: OK')\n\n # Cloudfront\n dist_id = client.cloudfront_get_distribution_id()\n if not dist_id:\n sp.info('Creating cloudfront distribution id')\n time.sleep(2)\n client.cloudfront_create_distribution()\n dist_id = client.cloudfront_get_distribution_id()\n sp.succeed('Distribution created: OK')\n sp.succeed('Distribution ID: %s' % dist_id)\n sp.succeed('Distribution Domain Name: %s' % client.cloudfront_get_distribution_domain_name())\n\n # Add cloudfront domain name to A records\n client.cloudfront_update_route53_a_records()\n\n time.sleep(2)\n\n # DONE...\n # S3\n else:\n sp.info('Site will be available from AWS S3 only')\n\n sp.clear()\n sp.succeed('Done!')\n print(\"\")\n print(\"URL: %s \" % client.domain_url)\n print(\"S3 : %s \" % client.s3_url)\n footer()" ]
[ "0.7073709", "0.69501", "0.65425813", "0.63506156", "0.6257246", "0.6216987", "0.61947876", "0.60763687", "0.6047417", "0.601704", "0.59991187", "0.5992791", "0.5950253", "0.58975947", "0.5893902", "0.5886802", "0.5858583", "0.5855254", "0.5841016", "0.5830183", "0.581706", "0.58120614", "0.57838196", "0.57737535", "0.5758311", "0.573134", "0.5711245", "0.5703922", "0.5667828", "0.56636405", "0.5662817", "0.5660604", "0.5656137", "0.56424844", "0.5639848", "0.5624835", "0.5587539", "0.5586912", "0.558047", "0.55632514", "0.5543315", "0.55328584", "0.55231583", "0.55203795", "0.5516736", "0.551218", "0.5507542", "0.5503265", "0.55030197", "0.5500514", "0.5495093", "0.5487385", "0.5486356", "0.5477792", "0.5473352", "0.54728824", "0.54717433", "0.5469401", "0.5456066", "0.5450751", "0.5450687", "0.54458016", "0.5429046", "0.54289997", "0.54269266", "0.5398697", "0.53869283", "0.53866917", "0.53761524", "0.5368383", "0.53381234", "0.5337198", "0.5331717", "0.53284615", "0.5313563", "0.5310964", "0.53096545", "0.5302766", "0.52973086", "0.5286906", "0.52703035", "0.52669924", "0.52666354", "0.52651983", "0.52608943", "0.52557623", "0.52536505", "0.52517146", "0.5239805", "0.5232418", "0.5229768", "0.52293664", "0.5227995", "0.52253646", "0.5213421", "0.52127254", "0.5211013", "0.52099776", "0.52075946", "0.5206592" ]
0.6556086
2
Deploy a new relase.
def deploy(): require("hosts", provided_by=[production, staging]) env.release = time.strftime("%Y-%m-%d_%H:%M:%S") upload_tar_from_git() install_requirements() setup_webserver() symlink_current_release() restart_webserver()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deploy():", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n build()\n copy()\n install()", "def deploy():\n update_treesheets()\n restart_treesheets()", "def deploy():\n test()\n if not env.is_staging:\n backup()\n prepare()\n restart_api()", "def deploy():\n new_archive = do_pack()\n\n if new_archive is None:\n return False\n\n res = do_deploy(new_archive)\n return res", "def deploy(self):\n raise NotImplementedError('You must implement the deploy() method '\n 'yourself!')", "def deploy():\n build()\n collect()\n commit()\n push()", "def deploy(self):\n src = self.settings.get('src', None)\n \n if src is not None:\n typ, _, src = src.partition(\":\")\n self._deployment = Deployment.get_deployment(typ)(self, src)\n else:\n self._deployment = Deployment(self, None)\n \n self._thread = self.node.spawn_thread( self._deployment.start )\n eventlet.sleep(0)", "def deploy():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('pwd')\n run('git stash')\n run('git pull -f origin master')\n run('fig -f prod.yml stop')\n run('fig -f prod.yml build')\n run('fig -f prod.yml up -d')", "def deploy(parameters):\n\n print(\"In deploy module\")", "def deploy():\n db.drop_all()\n create_DB()\n app.run()", "def deploy():\n git_pull()\n# build_virtualenv()\n# collectstatic()\n migrate()\n# reload_gunicorn()\n# restart_celery()\n puts(green(\"Deployment done!\"))", "def deploy():\n require('hosts', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('code_root')\n upload_tar_from_git(env.whole_path)\n install_requirements()\n symlink_current_release()\n migrate()\n restart_webservers()\n setup_permissions()\n collectstatic()", "def deploy(ctx):\n click.echo('deploying')\n ctx.deploy()\n click.echo('done')", "def deploy():\n _git_pull()\n _migrate()\n _collect_static_files()\n _restart_webserver()", "def deploy(config, args):\n log = logging.getLogger('kraftwerk.deploy')\n \n # TODO better way to detect new, or maybe move to dedicated command\n stdout, stderr = args.node.ssh('stat /var/service/%s' % args.project.name, pipe=True)\n new = bool(stderr) or args.override\n \n # Sync codebase over with the web user\n destination = 'web@%s:/web/%s/' % (args.node.hostname, args.project.name)\n stdout, stderr = args.project.rsync(destination)\n if stderr:\n log.error(\"Sync error: %s\" % stderr)\n sys.exit(stderr)\n \n # Copy requirements\n args.project.copy(args.node, 'requirements.txt')\n \n # Put together the setup script\n cmd = config.template(\"scripts/project_setup.sh\", \n project=args.project, new=new, \n upgrade_packages=args.upgrade_packages)\n stdout, stderr = args.node.ssh(cmd, pipe=True)\n if stderr:\n print stderr\n \n # TODO detect new services\n if not args.no_service_setup and new:\n for service in args.project.services():\n args.node.ssh(service.setup_script)\n \n print u\"%s live at %r\" % (args.project.canonical_domain(), args.node.hostname)", "def deploy():\n\n project_dir = '/home/gastosabertos/gastos_abertos_website'\n with cd(project_dir):\n local('tar -cvzf build.tar.gz build')\n run('cp -r build build-old')\n put('build.tar.gz', '.')\n run('tar -xvf build.tar.gz')", "def deploy():\n\n require('environment', provided_by=env.environments)\n update_source()\n update_requirements()\n mgmt('syncdb', '--migrate')\n restart_supervisor()", "def deploy():\n def mkdirp(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n def copytree(f1, f2):\n if osp.exists(f2):\n shutil.rmtree(f2)\n shutil.copytree(f1, f2)\n\n def symlink(p1, p2):\n if osp.exists(p2):\n try:\n os.remove(p2)\n except:\n shutil.rmtree(p2)\n os.symlink(osp.abspath(p1), p2)\n\n def pathof(modpath):\n mod = __import__(modpath)\n path = os.path.dirname(mod.__file__)\n return path\n\n app = flask.Flask(__name__, static_url_path='/static')\n ping_viz_path = osp.join(pathof('ping'), 'viz')\n roygbiv_web_path = osp.join(pathof('roygbiv'), 'web')\n\n try:\n\n # Poster\n mkdirp('deploy')\n copytree('2015', 'deploy/2015')\n\n # Brain\n copytree(roygbiv_web_path, 'deploy/brain')\n for fil in glob.glob('brain/*.html'):\n shutil.copy(fil, 'deploy/' + fil)\n shutil.copy('brain/two_hemis.html', 'deploy/brain/index.html')\n for fil in glob.glob('brain/css/*') + glob.glob('brain/js/*'):\n shutil.copy(fil, 'deploy/' + fil)\n mkdirp('deploy/brain/data')\n copytree('generated/data/fsaverage', 'deploy/brain/data/fsaverage') # data\n\n # Manhattan\n mkdirp('deploy/gwas')\n copytree(osp.join(ping_viz_path, 'manhattan'), 'deploy/gwas')\n shutil.copyfile('deploy/gwas/manhattan.html', 'deploy/gwas/index.html')\n mkdirp('deploy/gwas/data')\n for fil in glob.glob('generated/data/*.json'):\n shutil.copyfile(fil, os.path.join('deploy/gwas/data', os.path.basename(fil)))\n\n # scatter / similarity plots\n copytree('generated/plots', 'deploy/plots')\n\n # Create the default page.\n with open('deploy/index.html', 'w') as fp:\n fp.write(serve_index())\n\n # Finally, try and reduce snp file size.\n with open('deploy/gwas/data/SNPS_all.json', 'r') as fp:\n snps = simplejson.load(fp)\n with open('deploy/gwas/data/GWAS_MRI_cort_area_ctx_frontalpole_AI__Age_At_IMGExam.json', 'r') as fp:\n gwas = simplejson.load(fp)\n snps = dict([(k, v) for k, v in snps.items()\n if k in gwas[gwas.keys()[0]]])\n with open('deploy/gwas/data/snps_all.json', 'w') as fp:\n simplejson.dump(snps, fp)\n\n except Exception as e:\n print(\"Error deploying: %s\" % e)\n\n def serve():\n app.route('/')(serve_index)\n\n @app.route('/<path:path>')\n def serve_brain_data(path):\n return flask.send_from_directory('deploy', path)\n app.run()\n serve()", "def full_deploy():\n refresh_cts()\n push_mockups()\n deploy()", "def deploy():\n stage(branch='live', role='live')", "def test_redeploy(self):\n pass", "def deploy():\n archive_path = do_pack()\n if archive_path is None:\n print(\"pass\")\n return False\n return do_deploy(archive_path)", "def deploy(self, topology):\n print \"ABC - Deployer.deploy()\"", "def deploy():\n myfile = do_pack()\n if myfile is None:\n return False\n return do_deploy(myfile)", "def deploy():\n filepath = do_pack()\n if (filepath is None):\n return False\n return do_deploy(filepath)", "def deploy(self):\n\n netlify_cli = getattr(settings, \"NETLIFY_PATH\", None)\n if not netlify_cli:\n raise CommandError(\"NETLIFY_PATH is not defined in settings\")\n\n deployment = Deployment()\n deployment.save()\n\n command = [netlify_cli, \"deploy\"]\n command.append(\"--dir={}\".format(settings.BUILD_DIR))\n command.append(\"--prod\")\n command.append('--message=\"Wagtail Deployment #{}\"'.format(deployment.pk))\n\n site_id = getattr(settings, \"NETLIFY_SITE_ID\", None)\n if site_id:\n command.append(\"--site={}\".format(site_id))\n\n auth_token = getattr(settings, \"NETLIFY_API_TOKEN\", None)\n if auth_token:\n command.append(\"--auth={}\".format(auth_token))\n\n subprocess.call(command)", "def deploy():\n upload_static()\n compile_code()\n upload_code()\n upload_supervisor()\n start_server()", "def deploy():\n require(\n \"environment\",\n provided_by=[\n production,\n staging,\n test,\n ]\n )\n\n env.logger = IOLogger('DEBUG')\n\n check_prompt = (\n env.prompt and\n env.environment == \"production\" and\n not console.confirm(\n \"Are you sure you want to deploy production?\",\n default=False,\n )\n )\n if check_prompt:\n abort(\"Production deployment aborted.\")\n\n pull_changes()\n update_requirements()\n collect_static()\n sync_db()\n cleanup_pyc()\n restart_apache()", "def deploy():\n git_pull()\n if confirm(\"Install/upgrade requirements with pip?\"):\n install_requeriments()\n django_command('collectstatic')\n django_command('migrate')\n restart()", "def deploy():\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n with settings(warn_only=True):\n maintenance_up()\n \n checkout_latest()\n gzip_assets()\n deploy_to_s3()\n maintenance_down()", "def deploy():\n setup()\n builddir = get_build_dir()\n if sys.platform == 'win32':\n # Support cygwin rsync on windows:\n build_path = cygpath(slashed(builddir))\n else:\n build_path = slashed(builddir)\n rsync_project(env.admin_webroot, build_path, exclude=\".*\", delete=True)\n sudo(\"chmod -R 755 %(admin_webroot)s\" % env)", "def deploy(n = 10):\n upload_current_release()\n install_requisites()\n create_redirects()\n make_symlinks()\n symlink_current_release()\n sudo('service nginx reload')\n gc_deploys(n)", "def deploy(fingerengine, fingerprint):\n\n base = 'http://{0}:{1}'.format(fingerengine.options.ip, fingerprint.port)\n uri = '/manager/html/upload'\n war_file = fingerengine.options.deploy\n war_path = parse_war_path(war_file)\n cookies = checkAuth(fingerengine.options.ip, fingerprint.port,\n fingerprint.title, fingerprint.version)\n if not cookies:\n utility.Msg(\"Could not get auth for %s:%s\" %\n (fingerengine.options.ip, fingerprint.port), LOG.ERROR)\n return\n\n utility.Msg(\"Preparing to deploy {0}...\".format(war_file))\n\n if fingerprint.version in ['6.0', '7.0', '8.0']:\n # deploying via the gui requires a CSRF token\n (csrf, c) = fetchCSRF(base, cookies)\n if not csrf:\n return\n else:\n # set CSRF and refresh session id\n uri += '?org.apache.catalina.filters.CSRF_NONCE={0}'\n uri = uri.format(csrf)\n cookies = (c, cookies[1])\n\n # read in payload\n try:\n tag = 'deployWar'\n if fingerprint.version in ['4.0', '4.1']:\n tag = 'installWar'\n files = {tag : (war_path + '.war', open(war_file, 'rb'))}\n except Exception, e:\n utility.Msg(e, LOG.ERROR)\n return\n\n # deploy\n response = utility.requests_post(base + uri, files=files, cookies=cookies[0],\n auth=cookies[1])\n\n if response.status_code is 200 and \"OK\" in response.content:\n utility.Msg(\"Deployed {0} to /{1}\".format(war_file, war_path), LOG.SUCCESS)\n elif 'Application already exists' in response.content:\n utility.Msg(\"Application {0} is already deployed\".format(war_file), LOG.ERROR)\n elif response.status_code is 403:\n utility.Msg(\"This account does not have permissions to remotely deploy. Try\"\\\n \" using manager_deploy\", LOG.ERROR)\n else:\n utility.Msg(\"Failed to deploy (HTTP %d)\" % response.status_code, LOG.ERROR)", "def deploy():\n comp = do_pack()\n\n if (not comp):\n return False\n return do_deploy(comp)", "def deploy(self):\n self.loadseasoning()\n for key in self.config:\n print(\"Deploying \" + key + \" formula...\")\n self.cloneformula(key, self.config[key]['reponame'], \\\n self.config[key]['url'], self.config[key]['branch'])", "def deploy():\n from flask_migrate import upgrade\n\n upgrade() # upgrade to the latest db schema\n\n # setup necessary data to initialize database\n if Conference.query.filter_by(short_name='main').first():\n print('database already initialized')\n else:\n # add registration form questions\n FormConfiguration.insert_formConfiguration()\n Role.insert_roles() # create user roles\n generate_main_conf() # generate default main conference\n generate_admin() # generate the site admin", "def deploy(args):\n from scrapyd_client import deploy\n\n sys.argv.pop(1)\n deploy.main()", "def deploy():\n packing = do_pack()\n if packing is False:\n return False\n\n return do_deploy(packing)", "def deploy(fingerengine, fingerprint):\n\n global cookie \n\n cfm_path = abspath(fingerengine.options.deploy) \n cfm_file = parse_war_path(cfm_path, True)\n dip = fingerengine.options.ip\n\n # set our session cookie\n cookie = checkAuth(dip, fingerprint.port, title)\n if not cookie:\n utility.Msg(\"Could not get auth to %s:%s\" % (dip, fingerprint.port),\n LOG.ERROR)\n return\n\n utility.Msg(\"Preparing to deploy {0}..\".format(cfm_file))\n utility.Msg(\"Fetching web root..\", LOG.DEBUG)\n\n # fetch web root; i.e. where we can read the shell\n root = fetch_webroot(dip, fingerprint)\n if not root:\n utility.Msg(\"Unable to fetch web root.\", LOG.ERROR)\n return\n\n # create the scheduled task \n utility.Msg(\"Web root found at %s\" % root, LOG.DEBUG)\n utility.Msg(\"Creating scheduled task...\")\n\n if not create_task(dip, fingerprint, cfm_file, root):\n return\n\n # invoke the task\n utility.Msg(\"Task %s created, invoking...\" % cfm_file)\n run_task(dip, fingerprint, cfm_path)\n \n # remove the task\n utility.Msg(\"Cleaning up...\")\n delete_task(dip, fingerprint, cfm_file)", "def _deploy_app():\n rsync_project(env.remote_directory, env.local_directory,\n exclude=['.git/', '*.pyc', 'tests.py', 'migrations/'])\n sudo('service installer_app restart')", "def createNIMDeploy(self):\n\n if self.deploy == 'y':\n\n # find next IP on the range\n #\n new_ip = nim.NIMNewIP()\n new_ip = new_ip.getNewIP(self.nim_address, self.nim_ipstart,\n self.nim_ipend, self.nim_ipnet)\n self.new_ip = new_ip\n f_nim_reserved_ips = open('%s/poweradm/data/reserved_ips' %\n config.pahome, 'a')\n f_nim_reserved_ips.write('%s\\n' % (self.new_ip))\n f_nim_reserved_ips.close()\n\n f_nim_exe = open('%s/poweradm/changes/deploy_nim_%s-%s.nim' %\n (config.pahome, self.lparprefix, self.lparname),\n 'w')\n\n def f_nimexe_chksh():\n f_nim_exe.write(\"\\nif [ $? != 0 ];\"\n \"then\\n\"\n \"\\techo 'An error has occurred. Check the \"\n \"actions taken.'; \\n\"\n \"\\texit;\\n\"\n \"else\\n\"\n \"\\techo 'Command OK. Continuing';\\n\"\n \"fi\\n\")\n\n f_nim_exe.write('#!/bin/sh\\n')\n\n f_nim_exe.write('\\n\\necho \"Adding host %s-%s on NIM Server '\n '/etc/hosts\"\\n' % (self.lparprefix, self.lparname))\n\n f_nim_exe.write('\\n\\nssh -l poweradm %s sudo hostent -a %s -h %s' %\n (self.nim_address, self.new_ip, self.lparname))\n f_nimexe_chksh()\n\n f_nim_exe.write('\\n\\necho \"Creating machine %s-%s on NIM Server\"\\n'\n % (self.lparprefix, self.lparname))\n\n f_nim_exe.write('\\n\\nssh -l poweradm %s sudo nim -o define -t '\n 'standalone -a platform=chrp -a netboot_kernel=mp '\n '-a if1=\\\\\"$(ssh -l poweradm %s sudo lsnim -t ent '\n '| awk \\'{ print $1 }\\' | head -1) %s 0\\\\\" -a '\n 'cable_type1=tp %s\\n' % (self.nim_address,\n self.nim_address,\n self.lparname,\n self.lparname))\n f_nimexe_chksh()\n\n f_nim_exe.write('\\n\\necho \"Resource alocations and perform '\n 'operations to %s-%s on NIM Server\"\\n'\n % (self.lparprefix, self.lparname))\n\n if config.nim_deploy_mode.lower() == 'mksysb':\n\n f_nim_exe.write('\\n\\nssh -l poweradm %s sudo nim -o bos_inst'\n ' -a source=mksysb -a spot=%s -a mksysb=%s -a '\n 'no_client_boot=yes %s -a '\n 'accept_licenses=yes %s\\n'\n % (self.nim_address, self.nim_cfg_spot,\n self.nim_cfg_mksysbspot, self.bosinst_data,\n self.lparname))\n\n f_nimexe_chksh()\n\n elif nim_deploy_mode.lower() == 'lpp':\n\n f_nim_exe.write('\\n\\nssh -l poweradm %s sudo nim -o bos_inst '\n '-a source=spot -a spot=%s -a lpp_source=%s '\n '-a no_client_boot=yes %s -a '\n 'accept_licenses=yes %s\\n'\n % (self.nim_address, self.nim_cfg_spot,\n self.nim_cfg_mksysbspot, self.bosinst_data,\n self.lparname))\n f_nimexe_chksh()\n\n f_nim_exe.write('\\n\\necho \"Getting the Mac Address from %s-%s\"\\n'\n % (self.lparprefix, self.lparname))\n f_nim_exe.write('echo \"This might take a few minutes...\"\\n')\n\n f_nim_exe.write('\\n\\nmac_address=$(ssh -l poweradm %s '\n 'lpar_netboot -M -A -n -T off -t '\n 'ent %s-%s %s %s | grep C10-T1 | '\n 'awk \\'{ print $3 }\\')\\n'\n % (config.hmcserver, self.lparprefix,\n self.lparname, self.lparname, self.lparframe))\n f_nimexe_chksh()\n\n f_nim_exe.write('\\n\\necho \"Booting LPAR %s-%s on NIM Server\"\\n'\n % (self.lparprefix, self.lparname))\n f_nim_exe.write('echo \"This might take a few minutes...\"\\n')\n f_nim_exe.write('\\n\\nssh -l poweradm %s lpar_netboot -m '\n '$mac_address -T off -t ent -s auto -d auto '\n '-S %s -C %s %s-%s %s %s\\n'\n % (config.hmcserver, self.nim_ipdeploy,\n self.new_ip, self.lparprefix, self.lparname,\n self.lparname, self.lparframe))\n f_nimexe_chksh()\n\n print ('\\n\\nChange VLAN on profile to final config')\n f_nim_exe.write('\\n\\nssh -l poweradm %s chsyscfg -r prof -m '\n '%s -i \\'lpar_name=%s-%s, name=%s, '\n '\\\\\\\"virtual_eth_adapters=%s\\\\\\\"\\''\n % (config.hmcserver, self.lparframe,\n self.lparprefix, self.lparname, self.lparname,\n self.lparvlans))\n\n f_nim_exe.close()\n\n print ('\\n\\nInitializing deploy OS...')\n\n f_nim_deploy = open(self.nim_file, 'a')\n f_nim_deploy.write('#IP %s\\n' % (self.new_ip))\n f_nim_deploy.write('#NIMSERVER %s\\n' % (self.nim_server))\n f_nim_deploy.write('#NIMADDRESS %s\\n' % (self.nim_address))\n f_nim_deploy.close()\n\n os.system('sh %s/poweradm/changes/deploy_nim_%s-%s.nim' %\n (config.pahome, self.lparprefix, self.lparname))\n\n os.system('mv %s/poweradm/nim/%s-%s.nim %s/poweradm/nim_executed/'\n % (config.pahome, self.lparprefix,\n self.lparname, config.pahome))\n os.system('mv %s/poweradm/changes/deploy_nim_%s-%s.'\n 'nim %s/poweradm/changes_executed/'\n % (config.pahome, self.lparprefix, self.lparname,\n config.pahome))\n\n print ('\\nPlease, access HMC %s and run command below to finish '\n 'OS install. '\n '\\n\\t\\'mkvterm -m %s -p %s-%s\\' ' %\n (config.hmcserver, self.lparframe, self.lparprefix,\n self.lparname))", "def post_deploy(self) -> Any:\n raise NotImplementedError", "def deploy():\n archive_path = do_pack()\n\n if not archive_path:\n return False\n\n return do_deploy(archive_path)", "def test_create_deployment(self):\n pass", "def test_publish_deployment_run(self):\n pass", "def deploy():\n archive_path = do_pack()\n if archive_path is False:\n return false\n\n deploy_return = do_deploy(archive_path)\n return deploy_return", "def deploy():\n\n archive_path = do_pack()\n\n if archive_path is None:\n return False\n\n return do_deploy(archive_path)", "def deploy(verbose, app, archive):\n return _deploy_in_mode(\n mode=\"live\", verbose=verbose, log=log, app=app, archive=archive\n )", "def deploy(upgrade=False):\n print(\"Deploying project on {} !\".format(env.stage))\n execute('system.setup')\n execute('git.checkout')\n execute('virtualenv.setup')\n execute('django.setup')\n execute('cron.setup')\n execute('uwsgi.setup')\n execute('supervisor.setup')\n execute('nginx.setup')", "def deploy(self) -> str:\n raise NotImplementedError()", "def do_deploy(app, deltas={}, newrev=None):\n\n app_path = join(APP_ROOT, app)\n procfile = join(app_path, 'Procfile')\n log_path = join(LOG_ROOT, app)\n\n env = {'GIT_WORK_DIR': app_path}\n if exists(app_path):\n echo(\"-----> Deploying app '{}'\".format(app), fg='green')\n call('git fetch --quiet', cwd=app_path, env=env, shell=True)\n if newrev:\n call('git reset --hard {}'.format(newrev), cwd=app_path, env=env, shell=True)\n call('git submodule init', cwd=app_path, env=env, shell=True)\n call('git submodule update', cwd=app_path, env=env, shell=True)\n if not exists(log_path):\n makedirs(log_path)\n workers = parse_procfile(procfile)\n if workers and len(workers) > 0:\n settings = {}\n if exists(join(app_path, 'requirements.txt')) and found_app(\"Python\"):\n settings.update(deploy_python(app, deltas))\n elif exists(join(app_path, 'Gemfile')) and found_app(\"Ruby Application\") and check_requirements(['ruby', 'gem', 'bundle']):\n settings.update(deploy_ruby(app, deltas))\n elif exists(join(app_path, 'package.json')) and found_app(\"Node\") and (\n check_requirements(['nodejs', 'npm']) or check_requirements(['node', 'npm']) or check_requirements(['nodeenv'])):\n settings.update(deploy_node(app, deltas))\n elif exists(join(app_path, 'pom.xml')) and found_app(\"Java Maven\") and check_requirements(['java', 'mvn']):\n settings.update(deploy_java_maven(app, deltas))\n elif exists(join(app_path, 'build.gradle')) and found_app(\"Java Gradle\") and check_requirements(['java', 'gradle']):\n settings.update(deploy_java_gradle(app, deltas))\n elif (exists(join(app_path, 'Godeps')) or len(glob(join(app_path, '*.go')))) and found_app(\"Go\") and check_requirements(['go']):\n settings.update(deploy_go(app, deltas))\n elif exists(join(app_path, 'project.clj')) and found_app(\"Clojure Lein\") and check_requirements(['java', 'lein']):\n settings.update(deploy_clojure(app, deltas))\n elif 'release' in workers and 'web' in workers:\n echo(\"-----> Generic app detected.\", fg='green')\n settings.update(deploy_identity(app, deltas))\n elif 'static' in workers:\n echo(\"-----> Static app detected.\", fg='green')\n settings.update(deploy_identity(app, deltas))\n else:\n echo(\"-----> Could not detect runtime!\", fg='red')\n # TODO: detect other runtimes\n if \"release\" in workers:\n echo(\"-----> Releasing\", fg='green')\n retval = call(workers[\"release\"], cwd=app_path, env=settings, shell=True)\n if retval:\n echo(\"-----> Exiting due to release command error value: {}\".format(retval))\n exit(retval)\n workers.pop(\"release\", None)\n else:\n echo(\"Error: Invalid Procfile for app '{}'.\".format(app), fg='red')\n else:\n echo(\"Error: app '{}' not found.\".format(app), fg='red')", "def deploy():\n remote_dir = os.path.abspath(os.path.join(REMOTE_BASE_DIR, REPO_NAME))\n \n with settings(warn_only=True):\n if run(\"test -d %s\" % (remote_dir)).failed:\n puts(red(\"[Repo %s does not exist on remote at: %s]\" % (REPO_NAME, remote_dir)))\n with cd(REMOTE_BASE_DIR):\n run(\"git clone %s %s\" % (REPO_URL, REPO_NAME))\n\n puts(yellow(\"[Write logs]\"))\n run(\"echo '-----------------------------' > %s\" % REMOTE_ERR_FILE)\n run(\"echo `date` >> %s\" % REMOTE_ERR_FILE)\n run(\"echo '-----------------------------' >> %s\" % REMOTE_ERR_FILE)\n run(\"echo '-----------------------------' > %s\" % REMOTE_LOG_FILE)\n run(\"echo `date` >> %s\" % REMOTE_LOG_FILE)\n run(\"echo '-----------------------------' >> %s\" % REMOTE_LOG_FILE)\n\n puts(yellow(\"[Update repo: %s]\" % REPO_NAME))\n with cd(remote_dir):\n run(\"git pull origin master >> %s 2>> %s\" %\n (REMOTE_LOG_FILE, REMOTE_ERR_FILE))\n\n # reminder new static files\n puts(yellow('Do not forget to run collect staticfiles on DJANGO server.'))", "def full_deploy(api_version='HEAD', renderer_version='HEAD',\n markup_renderer_version=None):\n setup()\n\n api.full_deploy(api_version)\n renderer.full_deploy(renderer_version)\n markup_renderer.full_deploy(markup_renderer_version)\n\n upload_nginx_conf()\n upload_uwsgi_conf()\n install_systemd_services()", "def deploy(git_branch=None, restart_url='http://rnacentral.org', quick=False):\n if env.deployment == 'remote':\n deploy_remotely(git_branch, restart_url, quick)\n elif env.deployment == 'local':\n deploy_locally(git_branch, restart_url, quick)\n else:\n print('Check usage')", "def test_create_deployment_entire(self):\n pass", "def do_deploy(archive_path):\n if not archive_path:\n return False\n if not os.path.exists(archive_path):\n return False\n\n filename = archive_path.split(\"/\")[-1]\n put(archive_path, \"/tmp/{}\".format(filename))\n\n run(\"sudo mkdir -p /data/web_static/releases/{}\".format(filename))\n run(\"sudo tar -xzf /tmp/{} -C /data/web_static/releases/{}\"\n .format(filename, filename))\n run(\"sudo rm /tmp/{}\".format(filename))\n run(\"sudo mv /data/web_static/releases/{}/web_static/*\"\n \" /data/web_static/releases/{}\"\n .format(filename, filename))\n run(\"sudo rm -rf /data/web_static/releases/{}/web_static\"\n .format(filename))\n run(\"sudo rm -rf /data/web_static/current\")\n run(\"sudo ln -s /data/web_static/releases/{}/ /data/web_static/current\"\n .format(filename))\n print(\"New version successfully deployed!\")", "def deploy(self):\n self._switch.odlclient._request_json(self._table_path, method=\"post\", json={\n \"flow\": self._odl_inventory()\n })", "def deploy(env='development', update_settings='n', upgrade_apps='n'):\n update_site(env, update_settings, upgrade_apps)\n restart_site(env)", "def test_relaunch_deployment_run(self):\n pass", "def deploy(\n context, instance, user=get_local_user(), initial=False, stack=None, branch=BRANCH,\n):\n remote = True\n\n if initial:\n clone(context, instance, user, branch)\n else:\n backup(context, user, remote, instance, stack)\n\n update(context, user, remote, instance, branch)\n up(context, user, remote, instance, stack)", "def deploy_django(ref=None, debug=False, dirty=False):\n\n create_virtualenv()\n operations.fetch_render_copy(ref, debug, dirty, True)\n pip_requirements()\n migratedb()\n refresh_wsgi()", "def test_retest_deployment_run(self):\n pass", "def schedule_deploy():\n\n logger.info(\"Scheduling deploy\")\n scheduler.schedule_job(\"op_deploy\", {}, \"#general\", 60)", "def redeploy(*args, **kwargs):\n try:\n _redeploy(*args, **kwargs)\n except InvalidHTTPInvocationError:\n uri = os.getenv(\"SERVER_NAME\") + os.getenv(\"SCRIPT_NAME\")\n print(\"Status: 405 Method Not Allowed\")\n print(\"Content-Type: text/plain\\r\\n\\r\\n\")\n\n print(\"Status: 405 Method Not Allowed\")\n print(\n \"Invalid invocation. \"\n \"You must make a POST request with the secret.\\n\"\n \"\\n\"\n \" curl -XPOST -dsecret=XXXXXX \" + uri\n )\n except RedeployError as err:\n print(\"Status: 400 Bad Request\")\n print(\"Content-Type: text/plain\\r\\n\\r\\n\")\n\n print(\"Status: 400 Bad Request\")\n print(\"Could not redeploy:\", type(err).__name__)\n except subprocess.CalledProcessError as err:\n print(\"Status: 500 Server Error\")\n print(\"Content-Type: text/plain\\r\\n\\r\\n\")\n\n print(\"Status: 500 Server Error\")\n else:\n # All went okay :)\n print(\"Status: 200 OK\")\n print(\"Content-Type: text/plain\\r\\n\\r\\n\")\n\n print(\"Redeployment script run.\")", "def deploy(site):\n\n # Stop the program if the folder isn't initialized yet.\n Vagrant.stop_if_not_init()\n\n # Stop the program if the site is NOT herokufied.\n Heroku.stop_if_not_herokufied(site)\n\n # Now, run the \"deployheroku\" script on the VM.\n # That will deploy the site for you.\n Vagrant.run_script_on_vm(\"deployheroku\", site)", "def install():\n deploy()\n configure()", "def run():\n\n parser = OptionParser()\n parser.add_option(\"-d\", \"--dir\", dest=\"dir\", help=\"The app local directory\")\n parser.add_option(\"-r\", \"--remote_dir\", dest=\"remote_dir\", help=\"The app remote directory\")\n parser.add_option(\"-n\", \"--name\", dest=\"name\", help=\"The django app name\")\n parser.add_option(\"-f\", \"--full\", help=\"Provision before deploy\", default=False)\n parser.add_option(\"-o\", \"--no_files\", help=\"Don't copy the app files\", default=False)\n\n (options, args) = parser.parse_args()\n\n execute(deploy, **options.__dict__)", "def ensure(name,\n repository,\n rev=None,\n user=None,\n update_branch=True, # if on branch than checks remote if branch has been updated and than redeploys\n deploy_cmd=None, # executed on deploy\n test_cmd=None, # executed to verify deploy (before linking)\n on_failed_cmd=None, # executed on failed deploy\n activate_cmd=None, # i.e. to tell supervisor to restart the app\n keep=5, # how many versions should we keep\n ):\n\n def update_me():\n if __opts__['test']:\n ret['result'] = None\n ret['comment'] = 'Calling deployment.deploy'\n return\n __salt__['deployment.limit_history'](name, keep=keep)\n new_current = __salt__['deployment.deploy'](name, repository=repository, rev=rev, user=user, deploy_cmd=deploy_cmd, test_cmd=test_cmd, on_failed_cmd=on_failed_cmd, activate_cmd=activate_cmd)\n __salt__['deployment.limit_history'](name, keep=keep)\n ret['changes']['new_current'] = new_current\n\n ret = {\n 'name': name,\n 'changes': {},\n 'result': True,\n 'comment': ''\n }\n\n current_meta = __salt__['deployment.current'](name)\n\n if 'rev' not in current_meta:\n log.info(\"Deploying app as there is no currently running\")\n update_me()\n return ret\n\n if current_meta['rev'] == rev:\n if update_branch:\n is_on_branch = not __salt__['deployment.git_is_detached'](current_meta['path'])\n if is_on_branch and __salt__['deployment.git_is_remote_ahead'](current_meta['path'], current_meta['rev']):\n log.info(\"Deploying app as there is a new version on the branch on remote repository\")\n update_me()\n return ret\n else:\n update_me()\n return ret\n\n return ret", "def pub_deploy(args, project=\"\", account=\"\", api_key=\"\"):\n base_url, api_key, updated = get_project_connect(\n 'djaodjin',\n base_url=DEFAULT_API_ENDPOINT,\n api_key=api_key)\n project, account, updated = get_project_account(\n project=project, account=account)\n if updated:\n save_config()\n\n api_container_url = \\\n \"%(base_url)s/api/containers/%(organization)s/apps/%(app)s/\" % {\n 'base_url': base_url,\n 'organization': str(account),\n 'app': str(project)}\n data = None\n container_location = args[0] if args else None\n if container_location:\n data = {'location': container_location}\n resp = requests.post(api_container_url, data=data, auth=(api_key, \"\"))\n LOGGER.info(\"POST %s returns %d %s\",\n api_container_url, resp.status_code, resp.text)", "def prepare_deploy():\n from fabdeploy.django import test as django_test\n django_test()\n git.add_commit_pull()\n git.push()", "def do_deploy(archive_path):\n if path.exists(archive_path) is False:\n print(\"pass\")\n return False\n\n try:\n file_ = archive_path.split(\"/\")[1]\n filename = file_.split(\".\")[0]\n put(archive_path, \"/tmp/\")\n run(\"sudo mkdir -p /data/web_static/releases/\" + filename)\n run(\"sudo tar -zxf /tmp/{}.tgz -C {}\".format(\n filename, \"/data/web_static/releases/\" + filename))\n run(\"sudo rm /tmp/{}\".format(file_))\n run('sudo mv /data/web_static/releases/{}/web_static/* /data/web_static\\\n/releases/{}'.format(filename, filename))\n run(\"sudo rm -rf /data/web_static/current\")\n run(\"sudo ln -sf /data/web_static/releases/{}\\\n /data/web_static/current\".format(filename))\n print(\"New version deployed!\")\n return True\n except:\n return False", "def do_deploy(archive_path):\n if not os.path.exists(archive_path):\n return False\n\n file_ext = archive_path[archive_path.find('/') + 1:]\n file_name = archive_path[archive_path.find('/') + 1: -4]\n\n result = put(archive_path, '/tmp/' + file_ext)\n if result.failed:\n return False\n\n result = run('mkdir -p /data/web_static/releases/' + file_name + '/')\n if result.failed:\n return False\n\n result = run('tar -xzf /tmp/' + file_ext +\n ' -C /data/web_static/releases/' + file_name + '/')\n if result.failed:\n return False\n\n result = run('rm /tmp/' + file_ext)\n if result.failed:\n return False\n\n result = run('mv /data/web_static/releases/' + file_name +\n '/web_static/* /data/web_static/releases/' + file_name + '/')\n if result.failed:\n return False\n\n result = run('rm -rf /data/web_static/releases/' + file_name +\n '/web_static')\n if result.failed:\n return False\n\n result = run('rm -rf /data/web_static/current')\n if result.failed:\n return False\n\n result = run('ln -s /data/web_static/releases/' +\n file_name + '/ /data/web_static/current')\n if result.failed:\n return False\n\n print('New version deployed!')\n return True", "def test_clone_deployment(self):\n pass", "def deployFunc(runType):\n logger.info('Deploying lambda to {} environment'.format(runType))\n runProcess(runType, [\n 'deploy',\n '--config-file',\n 'run_config.yaml',\n '--requirements',\n 'requirements.txt'\n ])\n createEventMapping(runType)", "def create_and_run_deployment(\n project_id: int = Form(...),\n model_id: Text = Form(...),\n version: Text = Form(...),\n model_uri: Text = Form(...),\n type: Text = Form(...) # pylint: disable=redefined-builtin\n) -> JSONResponse:\n\n deploy_manager = DeployManager()\n deployment_id = deploy_manager.create_deployment(\n project_id, model_id, version, model_uri, type\n )\n return JSONResponse({'deployment_id': str(deployment_id)}, HTTPStatus.ACCEPTED)", "def do_deploy(archive_path):\n\n if not os.path.exists(archive_path):\n return(False)\n try:\n put(archive_path, \"/tmp/\")\n folder_path = \"/data/web_static/releases/\" + archive_path[9:-4]\n name_file = archive_path[9:]\n name_folder = archive_path[9:-4]\n date = archive_path[21:-4]\n releases = \"/data/web_static/releases/\"\n\n run(\"mkdir -p {}\".format(folder_path))\n run(\"tar -xzf /tmp/{} -C {}\".format(name_file, folder_path))\n run(\"rm /tmp/{}\".format(name_file))\n run(\"mv {}{}/web_static/* {}{}/\"\n .format(releases, name_folder, releases, name_folder))\n run(\"rm -rf {}{}/web_static\".format(releases, name_folder))\n run(\"rm -rf /data/web_static/current\")\n run(\"ln -s {} /data/web_static/current\".format(folder_path))\n print(\"New version deployed!\")\n\n return(True)\n except BaseException:\n return (False)", "def do_deploy(archive_path):\n if not exists(archive_path):\n return False\n fileNameExt = archive_path.split('/')[-1]\n fileName = fileNameExt.split(\".\")[0]\n result = put(archive_path, '/tmp/{}'.format(fileNameExt))\n if result.failed:\n return False\n result = run(\"rm -rf /data/web_static/releases/{}/\".format(fileName))\n if result.failed:\n return False\n result = run(\"mkdir -p /data/web_static/releases/{}/\".format(fileName))\n if result.failed:\n return False\n result = run(\"tar -xzf /tmp/{} -C /data/web_static/releases/{}/\"\n .format(fileNameExt, fileName))\n if result.failed:\n return False\n result = run(\"rm /tmp/{}\".format(fileNameExt))\n if result.failed:\n return False\n input = \"mv /data/web_static/releases/{}/web_static/*\\\n /data/web_static/releases/{}/\".format(fileName, fileName)\n result = run(input)\n if result.failed:\n return False\n result = run(\"rm -rf /data/web_static/releases/{}/web_static\"\n .format(fileName))\n if result.failed:\n return False\n result = run(\"rm -rf /data/web_static/current\")\n if result.failed:\n return False\n result = run(\"ln -s /data/web_static/releases/{}/ /data/web_static/current\"\n .format(fileName))\n if result.failed:\n return False\n print(\"New version deployed!\")\n return True", "def deploy():\n build()\n rsync_project(\n local_dir=os.path.abspath(env.config['destination']) + \"/\",\n remote_dir=env.remote_dir,\n delete=True,\n extra_opts='--exclude=\".DS_Store\"',\n )", "def postdeploy(verbose_level=1):\n check_arg(verbose_level, u._('Verbose level'), int)\n\n action = KollaAction(verbose_level=verbose_level,\n playbook_name='post-deploy.yml')\n ansible_job = action.postdeploy()\n return Job(ansible_job)", "def deploy():\n with cd(\"~/public_html/\"):\n run(\"/usr/local/cpanel/3rdparty/bin/git pull\")\n\n with cd(\"~/public_html/skin/frontend/gemz/default/tools/\"):\n run(\"grunt default\")\n #sudo(\"/scripts/enablefileprotect\")", "def deploy(self, name, task, app_config, provider_config, **kwargs):\n result = super().deploy(\n name, task, app_config, provider_config)\n return result", "def deploy(fingerengine, fingerprint):\n\n\tcfm_path = abspath(fingerengine.options.deploy)\n\tcfm_file = parse_war_path(cfm_path, True)\n\tdip = fingerengine.options.ip\n\n\tcookie = checkAuth(dip, fingerprint.port, title, fingerprint.version)[0]\n\tif not cookie:\n\t\tutility.Msg(\"Could not get auth\", LOG.ERROR)\n\t\treturn\n\n\tutility.Msg(\"Preparing to deploy {0}...\".format(cfm_file))\n\tutility.Msg(\"Fetching web root...\", LOG.DEBUG)\n\n\troot = fetch_webroot(dip, fingerprint, cookie)\n\tif not root:\n\t\tutility.Msg(\"Unable to fetch web root.\", LOG.ERROR)\n\t\treturn\n\t\n\t# create the scheduled task\n\tutility.Msg(\"Web root found at %s\" % root, LOG.DEBUG)\n\tutility.Msg(\"Creating scheduled task...\")\n\n\tif not create_task(dip, fingerprint, cfm_file, root, cookie):\n\t\treturn\n\n\t# invoke the task\n\tutility.Msg(\"Task %s created, invoking...\" % cfm_file)\n\trun_task(dip, fingerprint, cfm_path, cookie)\n\n\t# cleanup\n\tutility.Msg(\"Cleaning up...\")\n\tif not delete_task(dip, fingerprint, cfm_file, cookie):\n\t\tutility.Msg(\"Failed to remove task. May require manual removal.\", LOG.ERROR)", "def start_deployment(self):\n return", "def run_deployment(file):\n print(\"running deployment\")\n DEPLOYER = Process(target=deployment.start_deployment,\\\n args=(SERIAL_PARENT, ENCODER_CHILD, TROLL, file))\n DEPLOYER.start()\n DEPLOYER.join()", "def upload(self, request, pk=None):\n app = self.get_object()\n deployment = Revision()\n deployment.compressed_archive = request.FILES['file']\n deployment.app = app\n deployment.save()\n app.deploy()\n response = {}\n return Response(response)", "def deploy(env_type):\n render(env_type)\n\n bucket = _config['deploy'][env_type]['bucket']\n notice('deploying to %s' % bucket)\n\n # Sync to S3\n deploy_path = join(_config['project_path'], 'build', 'website')\n _s3cmd_sync(deploy_path, bucket)", "def test_release_deployment_run(self):\n pass", "def deploy_go_app(app_name, uri):\n execute(local_fetch_s3_artifact, uri)\n execute(deploy_artifact, app_name, uri)\n execute(create_symlink,\n '{}/config/config.yaml'.format(get_app_basedir(app_name)),\n '{}/etc/config.yaml'.format(get_current_release_dir(app_name)))", "def deploy_installer(l_dir=env.local_directory):\n env.local_directory = l_dir\n deploy_app(host_=env.myhost)", "def do_deploy(archive_path):\n if path.exists(archive_path):\n\n # File name without .tgz\n file_ext = archive_path.split('/')[1]\n file_alone = file_ext.split(\".\")[0]\n curr_release = \"/data/web_static/releases/\" + file_alone + '/'\n\n result = True\n\n # Deploy compressed file to the server /tmp/ directory\n upload = put(archive_path, \"/tmp/\")\n if upload.failed:\n result = False\n\n # Make dir to store the release\n dir_release = run(\"sudo mkdir -p \" + curr_release)\n if dir_release.failed:\n result = False\n\n # Uncompress file inside the folder created\n uncompress = run(\"sudo tar -xzf \" + \"/tmp/\\\n\" + file_ext + \" -C \" + curr_release)\n if uncompress.failed:\n result = False\n\n # Move all files from web_static to folder release\n move_info = run(\"sudo mv \" + curr_release + \"\\\nweb_static/* \" + curr_release)\n if move_info.failed:\n result = False\n\n # Remove empty web_static directory\n rm_empty = run(\"sudo rm -rf \" + curr_release + \"\\\nweb_static/\")\n if rm_empty.failed:\n result = False\n\n # Remove symbolic link current\n rm_link = run(\"sudo rm -rf /data/\\\nweb_static/current\")\n if rm_link.failed:\n result = False\n\n # Make new symbolic link\n new_link = run(\"sudo ln -s \" + curr_release + \" /data/\\\nweb_static/current\")\n if new_link.failed:\n result = False\n\n return result\n else:\n return False", "def run(syncdb=False):\n from fabdeploy.django import migrate as django_migrate, syncdb as django_syncdb\n import time\n env.release = time.strftime('%Y%m%d%H%M%S')\n prepare_deploy() # pull, test, push\n git.remote_pull()\n app.install_requirements()\n django_migrate(syncdb) # syncdb in case is first time\n deploy_static()", "def test_launch_deployment(self):\n pass", "def _deploy_instance(self):\n if not os.path.exists(self.instance_path):\n pw = pwd.getpwnam(self.user)\n mode = (\n stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |\n stat.S_IROTH | stat.S_IXOTH)\n utils.mkdir(self.instance_path, mode, pw[2], pw[3])\n path = \"{}/src/automx_wsgi.py\".format(self.repo_dir)\n utils.exec_cmd(\"cp {} {}\".format(path, self.instance_path),\n sudo_user=self.user, cwd=self.home_dir)", "def do_deploy(archive_path):\n if (os.path.isfile(archive_path) is False):\n print(\"wtf\")\n return False\n\n arch_name = archive_path.split('/')[-1]\n folder = (\"/data/web_static/release/\" + arch_name.split(\".\")[0])\n try:\n put(archive_path, \"/tmp/\")\n run(\"mkdir -p {}/\".format(folder))\n run(\"tar -xzf /tmp/{} -C {}\".format(arch_name, folder))\n run(\"rm /tmp/{}\".format(arch_name))\n run(\"mv {}/web_static/* {}/\".format(folder, folder))\n run(\"rm -rf {}/web_static\".format(folder))\n run(\"rm -rf /data/web_static/current\")\n run(\"ln -s {}/ /data/web_static/current\".format(folder))\n print(\"New version deployed!\")\n return (True)\n except:\n print(\"Not Deployed\")\n return (False)", "def do_deploy(archive_path):\n if not os.path.exists(archive_path):\n return False\n else:\n try:\n put(archive_path, \"/tmp/\")\n filename = archive_path.split('/')\n no_ext = filename[-1].split('.')\n archive = no_ext[0]\n run(\"mkdir -p /data/web_static/releases/\" + archive + \"/\")\n run(\"tar -zxf /tmp/\" + filename[1] +\n \" -C /data/web_static/releases/\" +\n archive + \"/\")\n run(\"rm /tmp/\" + filename[1])\n run(\"mv /data/web_static/releases/\" + archive +\n \"/web_static/* /data/web_static/releases/\" + archive + \"/\")\n run(\"rm -rf /data/web_static/releases/\" + archive + \"/web_static\")\n run(\"rm -rf /data/web_static/current\")\n run(\"ln -s /data/web_static/releases/\" + archive +\n \"/ /data/web_static/current\")\n print(\"New version deployed!\")\n return True\n except:\n return False", "def deploy():\n with cd(env.REMOTE_CODEBASE_PATH):\n run(\"git pull\")\n run(\"go build -o app\")\n sudo(\"supervisorctl reload\")" ]
[ "0.7744899", "0.706143", "0.706143", "0.706143", "0.6948104", "0.69311583", "0.6749892", "0.6712348", "0.67008215", "0.66962844", "0.6653298", "0.66502756", "0.6646828", "0.6614402", "0.65999466", "0.65739655", "0.65360653", "0.647553", "0.6442106", "0.64005727", "0.6380691", "0.6377483", "0.6370202", "0.6355212", "0.6341574", "0.62944686", "0.62756616", "0.6266037", "0.6256061", "0.62475204", "0.62345064", "0.6230383", "0.62116784", "0.6137906", "0.61366034", "0.61203265", "0.61013794", "0.6054217", "0.60508394", "0.6050493", "0.60415155", "0.6040013", "0.6032293", "0.6027652", "0.6025605", "0.59971744", "0.5986849", "0.59819126", "0.59618205", "0.5959017", "0.59024084", "0.5901047", "0.5892509", "0.5880394", "0.58765227", "0.58673304", "0.58647585", "0.5860517", "0.5857505", "0.58563524", "0.5836345", "0.5836005", "0.58312255", "0.5822901", "0.58206844", "0.58188766", "0.5817553", "0.5806266", "0.58043885", "0.5782608", "0.577037", "0.5765322", "0.5750539", "0.574906", "0.5747017", "0.57361203", "0.57273424", "0.57225245", "0.57203376", "0.57198083", "0.5719422", "0.57186157", "0.57151914", "0.5707466", "0.570426", "0.5698597", "0.5693291", "0.56770307", "0.56754446", "0.5669064", "0.5667813", "0.5658914", "0.5654143", "0.56484497", "0.5648237", "0.5641877", "0.563667", "0.5632147", "0.5627518", "0.56192106" ]
0.68142927
6
Create an archive from the given tree, upload, and untar it.
def upload_tar_from_git(): require("release", provided_by=[deploy]) tree = prompt("Please enter a branch or SHA1 to deploy", default="master") local("git archive --format=tar %s | gzip > %s.tar.gz" % (tree, env['release'])) sudo("mkdir %(path)s/releases/%(release)s" % env) put("%(release)s.tar.gz" % env, "%(path)s/packages/" % env, use_sudo=True) sudo("cd %(path)s/releases/%(release)s && tar zxf ../../packages/%(release)s.tar.gz" % env) local("rm %(release)s.tar.gz" % env)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_tar(self):\n with tarfile.open(self.tgzfile, \"w:gz\") as tar_handle:\n for root, _, files in os.walk(self.dirname):\n for file in files:\n tar_handle.add(os.path.join(root, file))", "def untar(conn, tarball, path):\n conn.run(f\"tar xf {tarball} -C {path}\")", "def untar(tar_path, cleanup=False):\n tfile = tarfile.open(tar_path, 'r')\n tfile.extractall(os.path.dirname(tar_path))\n tfile.close()\n if cleanup:\n os.remove(tar_path)", "def upload_tar_from_git(path):\n require('release', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('branch', provided_by=[prod])\n local('git checkout %s' % (env.branch))\n local('git archive --format=tar %s | gzip > %s.tar.gz' % (env.branch, env.release))\n sudo('mkdir -p %s' % (path))\n put('%s.tar.gz' % (env.release), '/tmp/', mode=0755)\n sudo('mv /tmp/%s.tar.gz %s/packages/' % (env.release, env.code_root))\n sudo('cd %s && tar zxf ../../../packages/%s.tar.gz' % (env.whole_path, env.release))\n local('rm %s.tar.gz' % (env.release))\n sudo('rm %s/packages/%s.tar.gz' % (env.code_root, env.release))", "def untar(file_path, target_dir=None, gzipped=True, verbose=False):\n return posix.untar(file_path, target_dir, gzipped, verbose)", "def create_tarball(fileobj, path, callback=None, compression_level=None):\n tar_cmd = [\"tar\", \"-zc\", \"--directory=%s\" % path, \".\"]\n env = os.environ.copy()\n if compression_level and 1 <= compression_level <= 9:\n env[\"GZIP\"] = \"-%d\" % compression_level\n tar_proc = make_subprocess(tar_cmd, stdout=True, stderr=True, env=env)\n\n try:\n while True:\n chunk = tar_proc.stdout.read(CHUNK_SIZE)\n if chunk == '':\n break\n\n if callback:\n callback(chunk)\n\n if fileobj:\n fileobj.write(chunk)\n except Exception:\n try_kill_process(tar_proc)\n raise\n\n finish_subprocess(tar_proc, tar_cmd)", "def untar(archive):\n log.info('Unpacking archive \"%s\".' % archive)\n tar = module.params['tar']\n tar_extra_options = shlex.split(module.params['tar_extra_options'])\n if not tar:\n tar = module.get_bin_path('tar', required=True)\n if archive.endswith('.gz'):\n uncompress = 'z'\n elif archive.endswith('.bz2'):\n uncompress = 'j'\n else:\n raise ValueError('Unsupported compression type: %s' % archive)\n options = ''.join(['x', uncompress, 'f'])\n args = [tar, options] + tar_extra_options + [archive]\n rc, out, err = module.run_command(args)\n log.info('untar: rc=%d out=%s err=%s', rc, out, err)\n if rc != 0:\n raise ValueError('tar command failed: %d' % rc)", "def archive(ctx, config):\n log.info('Creating archive directory...')\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'install', '-d', '-m0755', '--', archive_dir,\n ],\n wait=False,\n )\n )\n\n try:\n yield\n except Exception:\n # we need to know this below\n set_status(ctx.summary, 'fail')\n raise\n finally:\n passed = get_status(ctx.summary) == 'pass'\n if ctx.archive is not None and \\\n not (ctx.config.get('archive-on-error') and passed):\n log.info('Transferring archived files...')\n logdir = os.path.join(ctx.archive, 'remote')\n if (not os.path.exists(logdir)):\n os.mkdir(logdir)\n for rem in ctx.cluster.remotes.iterkeys():\n path = os.path.join(logdir, rem.shortname)\n misc.pull_directory(rem, archive_dir, path)\n # Check for coredumps and pull binaries\n fetch_binaries_for_coredumps(path, rem)\n\n log.info('Removing archive directory...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'rm',\n '-rf',\n '--',\n archive_dir,\n ],\n wait=False,\n ),\n )", "def upload_artifact(revision):\n # we upload the file from the local /tmp to the remote /tmp dir\n tmp_path = '/tmp/{revision}.tar.gz'.format(revision=revision)\n put(tmp_path, tmp_path)\n\n destination_path = '{base}/{revision}'.format(base=BASE_PATH,\n revision=revision)\n untar(tmp_path, destination_path)\n\n # remove both local and remote archives\n run('rm {}'.format(tmp_path))\n local('rm {}'.format(tmp_path))", "def pack(archive: Union[Path, str],\n paths: List[Union[Path, str]],\n cwd: Optional[Path] = None,\n exclude: Optional[List[Union[Path, str]]] = ()):\n archive = Path(archive)\n if cwd is None:\n cwd = Path.cwd()\n if archive.suffix == '.xz':\n archive = archive.with_suffix('')\n\n # Make sure all the paths have sane permissions.\n def walk(path):\n if path.is_symlink():\n return\n elif path.is_dir():\n # All dirs should be 755.\n mode = path.stat().st_mode & 0o777\n if mode != 0o755:\n path.chmod(0o755)\n\n for subpath in path.glob('*'):\n walk(subpath)\n elif path.is_file():\n # All scripts should be 755 while other files should be 644.\n mode = path.stat().st_mode & 0o777\n if mode in (0o755, 0o644):\n return\n if mode & 0o111:\n path.chmod(0o755)\n else:\n path.chmod(0o644)\n else:\n raise ValueError(f'{path}: unknown file type')\n\n logging.info('Forcing sane permissions on inputs')\n for path in paths:\n walk(cwd / path)\n\n logging.info('Creating %s tarball', archive.name)\n # We use relpath here to help out tar on platforms where it doesn't like\n # paths with colons in them (e.g. Windows). We have to construct the full\n # before running through relpath as relative archives will implicitly be\n # checked against os.getcwd rather than the explicit cwd.\n tar = os.path.relpath(cwd / archive, cwd)\n run(['tar', '--owner=0', '--group=0', '-cf', tar] +\n [f'--exclude={x}' for x in exclude] + ['--'] + paths, cwd=cwd)\n\n logging.info('Compressing tarball')\n run(['xz', '-f', '-T0', '-9', tar], cwd=cwd)", "def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()", "def __gitCreateArchive(self):\n self.vcs.gitCreateArchive(self.project.getProjectPath())", "def save_tar(self, target_dir):\n # type: (Text) -> None\n if not os.path.isdir(target_dir):\n raise ValueError('target_dir %r not found.' % target_dir)\n\n base_name = os.path.basename(target_dir)\n base_dir = os.path.dirname(target_dir)\n tarname = shutil.make_archive(base_name, 'gztar', root_dir=base_dir, base_dir=base_name)\n filekey = os.path.basename(tarname)\n blob = self.bucket.blob(filekey)\n blob.upload_from_filename(tarname)", "def handle_tar(file_path, extension, extracted_path, destination_directory):\n tar = tarfile.open(file_path, extension)\n # remove files if they already exist\n if os.path.exists(extracted_path):\n shutil.rmtree(extracted_path)\n tar.extractall(path=destination_directory)\n tar.close()", "def _expand_archive(self, name):\r\n target = path(self.temp_dir) / uuid.uuid4().hex\r\n os.mkdir(target)\r\n with tarfile.open(self.data_dir / name) as tar_file:\r\n tar_file.extractall(path=target)\r\n\r\n return target", "def untar(input_filename, extract_dir):\n try:\n tar_ds = tarfile.open(input_filename)\n except tarfile.TarError:\n raise ValueError(\"%s is not a tar file\" % (input_filename))\n tar_ds.extractall(path=extract_dir)\n tar_ds.close()", "def untar(tarfile, outdir):\n tmpdir = tempfile.mkdtemp()\n try:\n untared = _open_archive(tarfile, tmpdir)\n files = [f for f in untared if os.path.isfile(os.path.join(tmpdir, f))]\n dirs = [d for d in untared if os.path.isdir(os.path.join(tmpdir, d))]\n assert len(files) + len(dirs) == len(untared), 'Only files and directories'\n if _files_same(tmpdir, outdir, files) and _dirs_same(tmpdir, outdir, dirs):\n # Nothing new or different in the tarfile.\n return False\n # Some or all of the files / directories are new.\n _move_files(tmpdir, outdir, files)\n _move_dirs(tmpdir, outdir, dirs)\n return True\n finally:\n if os.path.isdir(tmpdir):\n shutil.rmtree(tmpdir)", "def upload(project, private=None, site=None, username=None, token=None, suffix='.tar.bz2', log_level=None):\n failed = _check_problems(project)\n if failed is not None:\n return failed\n\n # delete=True breaks on windows if you use tmp_tarfile.name to re-open the file,\n # so don't use delete=True.\n tmp_tarfile = NamedTemporaryFile(delete=False, prefix=\"anaconda_upload_\", suffix=suffix)\n tmp_tarfile.close() # immediately un-use it to avoid file-in-use errors on Windows\n try:\n status = archive(project, tmp_tarfile.name)\n if not status:\n return status\n status = client._upload(project,\n tmp_tarfile.name,\n uploaded_basename=(project.name + suffix),\n private=private,\n site=site,\n username=username,\n token=token,\n log_level=log_level)\n return status\n finally:\n os.remove(tmp_tarfile.name)", "def archive(self, virtual_path_to_tar_files, root, target_name):\n\n\n # TODO: RSYNC and do a diff. if there are no changes, we can just skip this part of the dockerfile to maximize layering\n for x in virtual_path_to_tar_files:\n assert os.path.isabs(x)\n\n rel_to_root = [os.path.relpath(x, '/') for x in virtual_path_to_tar_files]\n real_path = [os.path.join(root, x) for x in rel_to_root ]\n\n tup = zip(virtual_path_to_tar_files, real_path)\n\n tar = tarfile.open(os.path.join(self.dir, target_name), 'w')\n\n for vp, rp in tup:\n tar.add(rp, arcname=vp)\n\n tar.close()\n\n self.df.add_docker_cmd('ADD %s /' % target_name)", "def bulk_upload ( server, identity, src_dir, tgt_dir ) :\n tmp_tarfilepath = '/tmp/'\n tmp_tarfilename = server + '.tar.gz'\n tmp_file = tmp_tarfilepath + tmp_tarfilename\n\n # Tar up the src directory\n s = subprocess.call( [ '/bin/sh', '-c',\n 'cd ' + src_dir + ' && tar czf ' + tmp_file + ' .' ] )\n if s != 0 :\n print 'Unable to upload files.'\n return s\n\n # Copy the tar file up to the server\n s = scp_call( server, identity, tmp_file, tmp_tarfilepath )\n if s != 0 :\n print 'Unable to upload files.'\n return s\n\n # Unpack the tar file on the server\n s = ssh_call( server,\n identity,\n 'cd ' + tgt_dir + ' && sudo tar xzf ' + tmp_file + ' && rm ' + tmp_file + ' && sudo chown -R root:root *' )\n return s", "def restore(self, archive):\n logger.info(\"Restoring an old archive run from {}\".format(archive))\n if os.path.isabs(archive):\n restorefile = archive\n else:\n restorefile = os.path.join(self.containerpath, const.ARCHIVEDIR, archive)\n with ignored(OSError):\n shutil.rmtree(os.path.join(self.rundir))\n with tarfile.open(restorefile, \"r:gz\") as f:\n def is_within_directory(directory, target):\n \n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n \n prefix = os.path.commonprefix([abs_directory, abs_target])\n \n return prefix == abs_directory\n \n def safe_extract(tar, path=\".\", members=None, *, numeric_owner=False):\n \n for member in tar.getmembers():\n member_path = os.path.join(path, member.name)\n if not is_within_directory(path, member_path):\n raise Exception(\"Attempted Path Traversal in Tar File\")\n \n tar.extractall(path, members, numeric_owner=numeric_owner) \n \n \n safe_extract(f, self.rundir)\n self._refreshconfig()", "def upload_tree(self, files: List[Path], params: Dict[str, str]) -> bool:\n temp_zip = Comm.create_zip_from_files(files)\n files_post = {'zipFile': open(temp_zip.name, 'rb')}\n response = requests.post(\n self.upload_url,\n data=params,\n files=files_post,\n )\n files_post['zipFile'].close()\n temp_zip.close()\n data = response.text\n self.upload_output = data\n good_upload = self.parse_upload()\n return good_upload", "def _unpack_tar(self, dir, filters):\n try:\n unpackArchive = gbpc.UnpackTarArchive(self.path, dir, filters)\n unpackArchive()\n except gbpc.CommandExecFailed:\n # unpackArchive already printed an error message\n raise GbpError", "def create_tarfile(source_dir, filename=\"/tmp/contents.tar.gz\"):\n try:\n # Define the default signal handler for catching: Ctrl-C\n signal.signal(signal.SIGINT, signal.default_int_handler)\n with tarfile.open(filename, \"w:gz\") as tar:\n tar.add(source_dir, arcname=os.path.basename(source_dir))\n\n except (OSError, IOError) as e:\n # OSError: [Errno 13] Permission denied\n if e.errno == errno.EACCES:\n source_dir = os.getcwd() if source_dir == '.' else source_dir # Expand cwd\n warn_purge_exit(info_msg=\"Permission denied. Removing compressed data...\",\n filename=filename,\n exit_msg=(\"Permission denied. Make sure to have read permission \"\n \"for all the files and directories in the path: %s\")\n % (source_dir))\n # OSError: [Errno 28] No Space Left on Device (IOError on python2.7)\n elif e.errno == errno.ENOSPC:\n dir_path = os.path.dirname(filename)\n warn_purge_exit(info_msg=\"No space left. Removing compressed data...\",\n filename=filename,\n exit_msg=(\"No space left when compressing your data in: %s.\\n\"\n \"Make sure to have enough space before uploading your data.\")\n % (os.path.abspath(dir_path)))\n\n except KeyboardInterrupt: # Purge tarball on Ctrl-C\n warn_purge_exit(info_msg=\"Ctrl-C signal detected: Removing compressed data...\",\n filename=filename,\n exit_msg=\"Stopped the data upload gracefully.\")", "def archive(\n self,\n ostream: Union[TextIO, BinaryIO],\n treeish: Optional[str] = None,\n prefix: Optional[str] = None,\n **kwargs: Any,\n ) -> Repo:\n if treeish is None:\n treeish = self.head.commit\n if prefix and \"prefix\" not in kwargs:\n kwargs[\"prefix\"] = prefix\n kwargs[\"output_stream\"] = ostream\n path = kwargs.pop(\"path\", [])\n path = cast(Union[PathLike, List[PathLike], Tuple[PathLike, ...]], path)\n if not isinstance(path, (tuple, list)):\n path = [path]\n # end assure paths is list\n self.git.archive(\"--\", treeish, *path, **kwargs)\n return self", "def save_tar(self, target_dir):\n # type: (Text) -> None\n\n if not os.path.isdir(target_dir):\n raise ValueError(\"Target directory '{}' not found.\".format(target_dir))\n\n base_name = os.path.basename(target_dir)\n base_dir = os.path.dirname(target_dir)\n tarname = shutil.make_archive(base_name, 'gztar', root_dir=base_dir, base_dir=base_name)\n filekey = os.path.basename(tarname)\n self.s3.Object(self.bucket_name, filekey).put(Body=open(tarname, 'rb'))", "def do_pack():\n\n now = datetime.now()\n # format the name of the file with the timestamps\n now_year = now.year\n now_month = now.month\n now_day = now.day\n now_hour = now.hour\n now_minute = now.minute\n now_second = now.second\n # apply the format\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n now_year, now_month, now_day, now_hour, now_minute, now_second\n )\n # All archives must be stored in the folder versions\n local('mkdir -p versions')\n # execute locally the compression of the folder\n command = local(\"tar -cvzf \" + file_name + \" ./web_static/\")\n # return the archive path if the archive has been correctly generated\n if command.succeeded:\n return file_name\n else:\n return None", "def untar_file(filename, location):\n if not os.path.exists(location):\n os.makedirs(location)\n if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):\n mode = 'r:gz'\n elif (filename.lower().endswith('.bz2')\n or filename.lower().endswith('.tbz')):\n mode = 'r:bz2'\n elif filename.lower().endswith('.tar'):\n mode = 'r'\n else:\n mode = 'r:*'\n tar = tarfile.open(filename, mode)\n try:\n leading = has_leading_dir([member.name for member in tar.getmembers()])\n for member in tar.getmembers():\n fn = member.name\n if leading:\n fn = split_leading_dir(fn)[1]\n path = os.path.join(location, fn)\n if member.isdir():\n if not os.path.exists(path):\n os.makedirs(path)\n else:\n try:\n fp = tar.extractfile(member)\n except (KeyError, AttributeError), e:\n # Some corrupt tar files seem to produce this\n # (specifically bad symlinks)\n continue\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n destfp = open(path, 'wb')\n try:\n shutil.copyfileobj(fp, destfp)\n finally:\n destfp.close()\n fp.close()\n finally:\n tar.close()", "def put(self, obj):\n\n if obj is None:\n return\n\n assert os.path.exists(obj), f'path {obj} does not exist.'\n\n return shutil.make_archive(obj, 'tar', obj)", "def make_tarball(base_name, base_dir, compress=\"gzip\", verbose=0, dry_run=0,\n owner=None, group=None):\n tar_compression = {'gzip': 'gz', 'bzip2': 'bz2', 'xz': 'xz', None: '',\n 'compress': ''}\n compress_ext = {'gzip': '.gz', 'bzip2': '.bz2', 'xz': '.xz',\n 'compress': '.Z'}\n\n # flags for compression program, each element of list will be an argument\n if compress is not None and compress not in compress_ext.keys():\n raise ValueError(\n \"bad value for 'compress': must be None, 'gzip', 'bzip2', \"\n \"'xz' or 'compress'\")\n\n archive_name = base_name + '.tar'\n if compress != 'compress':\n archive_name += compress_ext.get(compress, '')\n\n mkpath(os.path.dirname(archive_name), dry_run=dry_run)\n\n # creating the tarball\n import tarfile # late import so Python build itself doesn't break\n\n log.info('Creating tar archive')\n\n uid = _get_uid(owner)\n gid = _get_gid(group)\n\n def _set_uid_gid(tarinfo):\n if gid is not None:\n tarinfo.gid = gid\n tarinfo.gname = group\n if uid is not None:\n tarinfo.uid = uid\n tarinfo.uname = owner\n return tarinfo\n\n if not dry_run:\n tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])\n try:\n tar.add(base_dir, filter=_set_uid_gid)\n finally:\n tar.close()\n\n # compression using `compress`\n if compress == 'compress':\n warn(\"'compress' will be deprecated.\", PendingDeprecationWarning)\n # the option varies depending on the platform\n compressed_name = archive_name + compress_ext[compress]\n if sys.platform == 'win32':\n cmd = [compress, archive_name, compressed_name]\n else:\n cmd = [compress, '-f', archive_name]\n spawn(cmd, dry_run=dry_run)\n return compressed_name\n\n return archive_name", "def upload_nextcloud_zipfile(import_type: migration.Migration, archive: UploadFile = File(...)):\n dir = app_dirs.MIGRATION_DIR.joinpath(import_type.value)\n dir.mkdir(parents=True, exist_ok=True)\n dest = dir.joinpath(archive.filename)\n\n with dest.open(\"wb\") as buffer:\n shutil.copyfileobj(archive.file, buffer)\n\n if not dest.is_file:\n raise HTTPException(status.HTTP_400_BAD_REQUEST)", "def create(self, data):\n with TemporaryDirectory() as temp_directory:\n floyd_logger.info(\"Compressing data ...\")\n compressed_file_path = os.path.join(temp_directory, \"data.tar.gz\")\n\n # Create tarfile\n floyd_logger.debug(\"Creating tarfile with contents of current directory: {}\".format(compressed_file_path))\n create_tarfile(source_dir='.', filename=compressed_file_path)\n\n total_file_size = os.path.getsize(compressed_file_path)\n floyd_logger.info(\"Creating data source. Total upload size: {}\".format(sizeof_fmt(total_file_size)))\n floyd_logger.info(\"Uploading compressed data ...\")\n\n # Add request data\n request_data = []\n request_data.append((\"data\", ('data.tar', open(compressed_file_path, 'rb'), 'text/plain')))\n request_data.append((\"json\", json.dumps(data.to_dict())))\n\n multipart_encoder = MultipartEncoder(\n fields=request_data\n )\n\n # Attach progress bar\n progress_callback = create_progress_callback(multipart_encoder)\n multipart_encoder_monitor = MultipartEncoderMonitor(multipart_encoder, progress_callback)\n\n response = self.request(\"POST\",\n self.url,\n data=multipart_encoder_monitor,\n headers={\"Content-Type\": multipart_encoder.content_type},\n timeout=3600)\n\n floyd_logger.info(\"Done\")\n return response.json().get(\"id\")", "def do_pack():\n date = datetime.datetime.now()\n archive = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(date.year,\n date.month,\n date.day,\n date.hour,\n date.minute,\n date.second)\n local('mkdir -p versions')\n check = local('tar -cvzf {} web_static'.format(archive))\n if check.failed:\n return None\n else:\n return archive", "def make_tar(self, package, input_dir, build_dir, add_args=None):\n tar = self.options.tar_command\n\n # Generate the .tar.gz file\n filename = package + '.tar.gz'\n out_file = open(os.path.join(build_dir, filename), \"w\")\n args = [tar, '--format=gnu', '--exclude-vcs', '-C', build_dir]\n if self.config.get('tar', {}).get('ignore', []):\n for patt in self.config['tar']['ignore']:\n args += ['--exclude', patt]\n if add_args:\n args += add_args\n args += ['-c', input_dir]\n logging.debug(\"Creating %s\", filename)\n tar_proc = subprocess.Popen(args, stdout=subprocess.PIPE)\n gzip_proc = subprocess.Popen(['gzip', '-9'], stdin=tar_proc.stdout,\n stdout=out_file)\n\n if tar_proc.wait() != 0 or gzip_proc.wait() != 0:\n logging.error(\"tar/gzip failed, exiting\")\n sys.exit(1)\n out_file.close()\n logging.info('%s written', filename)\n return filename", "def extractTar(tar_file):\r\n\r\n tfile = tarfile.open(tar_file, 'r')\r\n tar_members = tfile.getmembers()\r\n for tar_member in tar_members:\r\n tar_member.name = os.path.basename(tar_member.name) # Strip out the path and keep just the file name\r\n tfile.extract(tar_member, path=\"tempdata/\")\r\n tfile.close()\r\n print \"Finished extracting tar file\"\r\n return", "def tar(self, out=sys.stdout, config=None, *args, **kw):\r\n if config is not None:\r\n config = self.manifest.config_schema.validate(config)\r\n if self.manifest.get(\"templates\"):\r\n templates_dir = self.copy(templates=True)\r\n for template in self.find(templates=True):\r\n EJSTemplate(templates_dir + template).apply(templates_dir + template, config)\r\n tar = tarfile.open(\"\", mode=\"w|\", fileobj=out)\r\n templates = self.manifest.get(\"templates\")\r\n for path in self.find(*args, **kw):\r\n if config and path in templates:\r\n real_path = templates_dir + path\r\n EJSTemplate(real_path).apply(real_path, config)\r\n else:\r\n real_path = self.unchroot_path(path)\r\n tar.add(real_path, path, recursive=False)\r\n tar.close()", "def _decompress_tarball(*, in_fileobj, out_fileobj):\n with tarfile.open(fileobj=in_fileobj, mode=\"r\") as it, tarfile.open(\n fileobj=out_fileobj, mode=\"w|\"\n ) as ot:\n for member in it.getmembers():\n extracted = it.extractfile(member)\n ot.addfile(member, extracted)", "def untar(file_path, extract_folder=None):\n if extract_folder is None:\n extract_folder = os.path.dirname(file_path)\n tar = tarfile.open(file_path)\n tar.extractall(extract_folder)\n tar.close()", "def untgz(tgz_filename, out_dir):\r\n logging.info(\"Source: %s\" % tgz_filename)\r\n tgz = TgzHelper(tgz_filename, out_dir)\r\n tgz.extract()", "def remote_archiveUpload(self, talk_id, upload_id, role):\n source = yield self.getUpload(upload_id)\n extension = source.splitext()[1]\n\n # TODO: Check if the talk identified by talk_id exists and bind the\n # document to it.\n\n # TODO: Validate the given ``role`` argument (either strictly against a\n # list of known roles or loosely for sanity).\n\n # 2. Construct the final pathname\n version_id = ObjectId()\n basename = str(version_id) + extension\n destination = settings.data_root.child(talk_id).child(role)\n if not destination.exists():\n destination.makedirs()\n destination = destination.child(basename)\n\n # 3. move the file to its destination\n yield threads.deferToThread(source.moveTo, destination)\n\n # 2. Save the info to the database\n asset = Asset(\n _id=version_id,\n archiver_id=self.getID(),\n talk_id=talk_id,\n role=role\n )\n version = AssetVersion(\n version_id=version_id,\n filename=destination\n )\n asset.versions.append(version)\n\n yield asset.save()\n\n # 5. Start the upload triggers\n task = self.processAsset(asset)\n\n # TODO: Define the return value of this method. Shall it be the task,\n # the version_id/asset_id or both?\n defer.returnValue((str(version_id), task.id))", "def upload(self, request, pk=None):\n app = self.get_object()\n deployment = Revision()\n deployment.compressed_archive = request.FILES['file']\n deployment.app = app\n deployment.save()\n app.deploy()\n response = {}\n return Response(response)", "def update_rootfs_archive(self):\n logging.info(\"starting to update rootfs archive\")\n\n # Remove existing archive before generating the new one\n try:\n if os.path.isfile(self.project.archive_filename):\n logging.info(\"removing previous archive file : \" + self.project.archive_filename)\n os.remove(self.project.archive_filename)\n\n # Catch file removal exceptions\n except OSError as exception:\n logging.critical(\"Error: %s - %s.\", exception.filename, exception.strerror)\n self.cleanup_installation_files()\n exit(1)\n\n # Create the new archive\n cache_archive = tarfile.open(self.project.archive_filename)\n cache_archive.add(name=self.project.rootfs_mountpoint)\n cache_archive.close()", "def do_pack():\n now = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n local('mkdir -p versions')\n result = local('tar -czvf versions/web_static_{}.tgz web_static'\n .format(now))\n if result.failed:\n return None\n else:\n return result", "def extract_tarball(fileobj, path, callback=None):\n tar_cmd = [\"tar\", \"-zx\", \"--directory=%s\" % path]\n tar_proc = make_subprocess(tar_cmd, stderr=True, stdin=True)\n\n try:\n while True:\n chunk = fileobj.read(CHUNK_SIZE)\n if chunk == '':\n break\n\n if callback:\n callback(chunk)\n\n tar_proc.stdin.write(chunk)\n except Exception:\n try_kill_process(tar_proc)\n raise\n\n finish_subprocess(tar_proc, tar_cmd)", "def archive(filepath,archive_dir='archive'):\n\n # Make sure we have a directory to archive to\n try:\n mkdir(archive_dir)\n except:\n print(\"Error making archive directory\")\n return\n\n try:\n (dir, filename) = os.path.split(filepath)\n outfile = os.path.join(dir,archive_dir,filename)+'.gz'\n with open(filename, 'rb') as f_in, gzip.open(outfile, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n except Exception as e:\n print(\"Error archiving \",filepath)\n print(e)\n else:\n try:\n os.remove(filepath)\n except:\n print(\"Error removing \",filepath)", "def _make_archive(file_list, archive, root):\n with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as zipf:\n for f in file_list:\n zipf.write(f, os.path.relpath(f, root))", "def testTarBundling(self):\n try:\n tP = os.path.join(self.__workPath, \"t0.tar.gz\")\n dirPath = os.path.join(self.__inpDirPath, \"topdir\")\n\n ok = self.__fileU.bundleTarfile(tP, [dirPath], mode=\"w:gz\", recursive=True)\n self.assertTrue(ok)\n\n numBytes = self.__fileU.size(tP)\n self.assertGreaterEqual(numBytes, 250)\n #\n md5 = self.__fileU.hash(tP, hashType=\"md5\")\n self.assertTrue(md5 is not None)\n #\n ok = self.__fileU.unbundleTarfile(tP, dirPath=self.__workPath)\n self.assertTrue(ok)\n #\n tP = os.path.join(self.__workPath, \"t1.tar.gz\")\n dirPathList = [os.path.join(self.__inpDirPath, \"topdir\", \"subdirA\"), os.path.join(self.__inpDirPath, \"topdir\", \"subdirB\")]\n\n ok = self.__fileU.bundleTarfile(tP, dirPathList, mode=\"w:gz\", recursive=True)\n self.assertTrue(ok)\n #\n ok = self.__fileU.unbundleTarfile(tP, dirPath=self.__workPath)\n self.assertTrue(ok)\n\n tP = os.path.join(self.__workPath, \"t2.tar\")\n dirPathList = [os.path.join(self.__inpDirPath, \"topdir\", \"subdirA\"), os.path.join(self.__inpDirPath, \"topdir\", \"subdirB\")]\n\n ok = self.__fileU.bundleTarfile(tP, dirPathList, mode=\"w\", recursive=True)\n self.assertTrue(ok)\n #\n ok = self.__fileU.unbundleTarfile(tP, dirPath=self.__workPath)\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def create_artifact(current_revision):\n archive_path = '/tmp/{revision}.tar.gz'.format(revision=current_revision)\n local('tar -czf {archive_path} --exclude=.git *'.format(archive_path=archive_path))", "def archive(self, files, name):\n self.log.debug(\"Putting files into archive: %s\" % \"\\n\".join(files))\n tar_name = \"%s%s\" % (name, self.extension)\n if os.path.exists(tar_name):\n raise RuntimeError (\"Tried to create an archive that already exists: %s\" % tar_name) \n else:\n self.log.info(\"Creating a new archive %s\" % tar_name)\n tar = tarfile.open(tar_name, 'w:gz');\n for name in files:\n tar.add(name)\n print '%s'% (name)\n tar.close()\n return tar_name", "def tar_dir(output_path, source_dir):\n with tarfile.open(output_path, \"w:gz\") as tar:\n tar.add(source_dir, arcname=os.path.basename(source_dir))", "def _outside_tar2(self):\r\n outside_tar = self.unsafe_common_dir / \"unsafe_file.tar.gz\"\r\n with tarfile.open(outside_tar, \"w:gz\") as tar:\r\n tar.addfile(tarfile.TarInfo(str(self.unsafe_common_dir / \"../a_file\")))\r\n\r\n return outside_tar", "def _unzip_archive(archive_path, target_directory, source_path=None, **_):\n\n # Create a temporary directory.\n # Create a zip archive object.\n # Extract the object.\n ctx.logger.debug('Unzipping {src} to {dst}.'.format(\n src=archive_path, dst=target_directory))\n\n src = unzip_archive(archive_path, skip_parent_directory=False)\n copy_directory(src, target_directory)\n remove_dir(src)\n return target_directory", "def _extract_tar_file(tar_path, buildspace_tree, unpack_dir, ignore_files, relative_to):\n\n class NoAppendList(list):\n \"\"\"Hack to workaround memory issues with large tar files\"\"\"\n def append(self, obj):\n pass\n\n # Simple hack to check if symlinks are supported\n try:\n os.symlink('', '')\n except FileNotFoundError:\n # Symlinks probably supported\n symlink_supported = True\n except OSError:\n # Symlinks probably not supported\n get_logger().info('System does not support symlinks. Ignoring them.')\n symlink_supported = False\n except BaseException:\n # Unexpected exception\n get_logger().exception('Unexpected exception during symlink support check.')\n raise BuildkitAbort()\n\n resolved_tree = buildspace_tree.resolve()\n\n with tarfile.open(str(tar_path)) as tar_file_obj:\n tar_file_obj.members = NoAppendList()\n for tarinfo in tar_file_obj:\n try:\n if relative_to is None:\n tree_relative_path = unpack_dir / PurePosixPath(tarinfo.name)\n else:\n tree_relative_path = unpack_dir / PurePosixPath(tarinfo.name).relative_to(\n relative_to) # pylint: disable=redefined-variable-type\n try:\n ignore_files.remove(tree_relative_path.as_posix())\n except KeyError:\n destination = resolved_tree / tree_relative_path\n if tarinfo.issym() and not symlink_supported:\n # In this situation, TarFile.makelink() will try to create a copy of the\n # target. But this fails because TarFile.members is empty\n # But if symlinks are not supported, it's safe to assume that symlinks\n # aren't needed. The only situation where this happens is on Windows.\n continue\n if tarinfo.islnk():\n # Derived from TarFile.extract()\n new_target = resolved_tree / unpack_dir / PurePosixPath(\n tarinfo.linkname).relative_to(relative_to)\n tarinfo._link_target = new_target.as_posix() # pylint: disable=protected-access\n if destination.is_symlink():\n destination.unlink()\n tar_file_obj._extract_member(tarinfo, str(destination)) # pylint: disable=protected-access\n except BaseException:\n get_logger().exception('Exception thrown for tar member: %s', tarinfo.name)\n raise BuildkitAbort()", "def archive(mongo_backup_file):\r\n filename = get_archive_filename()\r\n tar = tarfile.open(filename, \"w|gz\")\r\n tar.add(mongo_backup_file)\r\n tar.close()\r\n\r\n return filename", "def archive_directory(dir_: str, tar_path: str):\n with tarfile.open(tar_path, 'w', encoding='utf-8') as tar:\n tar.add(dir_, arcname=os.path.sep)", "def unpack(archive: Union[Path, str],\n cwd: Optional[Path] = None,\n files: Optional[List[Union[Path, str]]] = ()):\n archive = Path(archive)\n if cwd is None:\n cwd = Path.cwd()\n if files:\n files = ['--'] + list(files)\n else:\n files = []\n\n # Try to make symlink usage easier in Windows.\n extra_env = {\n 'MSYS': 'winsymlinks:nativestrict',\n }\n\n logging.info('Unpacking %s', archive.name)\n # We use relpath here to help out tar on platforms where it doesn't like\n # paths with colons in them (e.g. Windows). We have to construct the full\n # before running through relpath as relative archives will implicitly be\n # checked against os.getcwd rather than the explicit cwd.\n src = os.path.relpath(cwd / archive, cwd)\n run(['tar', '--no-same-owner', '-xf', src] + files, cwd=cwd,\n extra_env=extra_env)", "def upload(ctx: click.Context, **kwargs):\n root_commands.cmd_upload(ctx.obj, **kwargs)", "def __extract_tgz(self):\n tar_file = tarfile.open(self.archive)\n try:\n extract_dir = tempfile.mkdtemp()\n archive_binaries_dir = self.__create_extraction_dir(\n tar_file.getnames(), extract_dir, tar_file.extract)\n finally:\n tar_file.close()\n return archive_binaries_dir, extract_dir", "def _unpack_archive(self, dir, filters):\n ext = os.path.splitext(self.path)[1]\n if ext in [\".zip\", \".xpi\"]:\n if filters:\n raise GbpError(\"Can only filter tar archives: %s\", (ext, self.path))\n self._unpack_zip(dir)\n else:\n self._unpack_tar(dir, filters)", "def create_backup_file(self, source_dir, archive_file):\n tar_file = tarfile.open(archive_file, 'w|gz')\n try:\n tar_file.add(source_dir)\n finally:\n tar_file.close()", "def pack(self, newarchive, filters=None):\n if not self.unpacked:\n raise GbpError(\"Need an unpacked source tree to pack\")\n\n if not filters:\n filters = []\n\n if not isinstance(filters, list):\n raise GbpError(\"Filters must be a list\")\n\n try:\n unpacked = self.unpacked.rstrip('/')\n repackArchive = gbpc.PackTarArchive(newarchive,\n os.path.dirname(unpacked),\n os.path.basename(unpacked),\n filters)\n repackArchive()\n except gbpc.CommandExecFailed:\n # repackArchive already printed an error\n raise GbpError\n return type(self)(newarchive)", "def upload(args):\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit('To upload a file you need to provide a username and'\n ' password.')\n\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.destination)\n if remote_path == '':\n remote_path = os.path.split(args.source)[-1]\n\n store = project.storage(storage)\n if args.recursive:\n if not os.path.isdir(args.source):\n raise RuntimeError(\"Expected source ({}) to be a directory when \"\n \"using recursive mode.\".format(args.source))\n\n # local name of the directory that is being uploaded\n _, dir_name = os.path.split(args.source)\n\n for root, _, files in os.walk(args.source):\n subdir_path = os.path.relpath(root, args.source)\n for fname in files:\n local_path = os.path.join(root, fname)\n with open(local_path, 'rb') as fp:\n # build the remote path + fname\n name = os.path.join(remote_path, dir_name, subdir_path,\n fname)\n store.create_file(name, fp, force=args.force,\n update=args.update)\n\n else:\n with open(args.source, 'rb') as fp:\n store.create_file(remote_path, fp, force=args.force,\n update=args.update)", "def compress_directory(directory, filename):\r\n mode = 'w:gz'\r\n name = path(directory).name\r\n with tarfile.open(filename, mode) as tar_file:\r\n tar_file.add(directory, arcname=name)", "def process_archive(self, file):\n self.recursive_archive_depth += 1\n # LOG: write_log or somehow log the archive file here\n if self.recursive_archive_depth >= self.max_recursive_depth:\n file.make_dangerous('Archive bomb')\n else:\n tempdir_path = file.make_tempdir()\n # TODO: double check we are properly escaping file.src_path\n # otherwise we are running unvalidated user input directly in the shell\n command_str = '{} -p1 x \"{}\" -o\"{}\" -bd -aoa'\n unpack_command = command_str.format(SEVENZ_PATH,\n file.src_path, tempdir_path)\n self._run_process(unpack_command)\n self.process_dir(tempdir_path, file.dst_path)\n self.safe_rmtree(tempdir_path)\n self.recursive_archive_depth -= 1", "def __make_tree(self, wd, root=\"d1\", create=True):\n d1 = \"%s/%s\" % (wd, root)\n t1 = FSTree(d1)\n d2 = \"%s/d2\" % d1\n t2 = t1.add(d2)\n if create:\n hdfs.mkdir(d2)\n for t, d, bn in ((t1, d1, \"f1\"), (t2, d2, \"f2\")):\n f = \"%s/%s\" % (d, bn)\n if create:\n hdfs.dump(self.data, f, mode=\"wb\")\n t.add(f, 0)\n return t1", "def cli_upload(parser):\n subparser = argparse.ArgumentParser(description='Upload to vault',\n parents=[parser])\n\n subparser.add_argument('-l', '--local',\n required=True,\n type=str,\n help='Local path')\n subparser.add_argument('-f', '--force',\n action='store_true',\n dest='overwrite',\n default=False)\n subparser.add_argument('-s', '--storage',\n type=str,\n required=False,\n default='drop.jarvice.com',\n dest='storage',\n help='Vault address')\n subparser.add_argument('-d', '--drop_remote',\n type=str,\n required=False,\n dest='remote',\n help='Remote path')\n args = subparser.parse_args()\n\n local = args.local\n store = args.storage\n remote = args.remote\n overwrite = args.overwrite\n\n utils.upload(config['username'], config['apikey'],\n local, store, remote, overwrite=overwrite)", "def create_tarball(scratch_dir, tarball_filename, cleanup=True):\n dirname, tmpdir = os.path.split(scratch_dir)\n with cd(dirname):\n with tarfile.open(tarball_filename, \"w:gz\") as tar:\n tar.add(scratch_dir, arcname=ZIP_DIRNAME)\n if cleanup:\n shutil.rmtree(scratch_dir)\n return os.path.join(dirname, tarball_filename)", "def create_docker_context(self):\n\n self.tarfile = io.BytesIO()\n\n with tarfile.open(fileobj=self.tarfile, mode=\"w|\") as tar:\n for f in self.files:\n tarinfo = tarfile.TarInfo(f['name'])\n tarinfo.size = len(f['content'])\n if 'mode' in f:\n tarinfo.mode = f['mode']\n tar.addfile(tarinfo, io.BytesIO(f['content'].encode('utf-8')))\n self.tarfile.seek(0) # Reset from EOF", "def put_files(container, files):\n ts = io.BytesIO()\n t = tarfile.TarFile(mode='w', fileobj=ts)\n for ft in files:\n fp = None\n fc = None\n fm = None\n if len(ft) == 3:\n fp, fc, fm = ft\n else:\n fp, fc = ft\n fs = io.BytesIO()\n fs.write(fc)\n fs.seek(0)\n fi = tarfile.TarInfo(name=fp)\n fi.size = len(fs.getvalue())\n if fm:\n fi.mode = fm\n t.addfile(tarinfo=fi, fileobj=fs)\n t.close()\n ts.seek(0)\n return dockerClient.put_archive(container=container, path=\"/\", data=ts)", "def pack_tar(output_filename, sources, type='gz'):\n if type == 'tgz':\n type = 'gz'\n elif type == 'tar':\n type = ''\n tar_ds = tarfile.open(output_filename, 'w:' + type)\n if not isinstance(sources, (list, tuple)) and \\\n isinstance(sources, str):\n sources = [sources]\n for source in sources:\n tar_ds.add(source, arcname=os.path.basename(source))\n tar_ds.close()", "def _outside_tar(self):\r\n outside_tar = self.unsafe_common_dir / \"unsafe_file.tar.gz\"\r\n with tarfile.open(outside_tar, \"w:gz\") as tar:\r\n tar.addfile(tarfile.TarInfo(str(self.content_dir / \"a_file\")))\r\n\r\n return outside_tar", "def do_pack():\n time_test = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n file_name = \"versions/web_static_\" + time_test + \".tgz\"\n command1 = \"mkdir -p versions\"\n command2 = \"tar -czvf \" + file_name + \" web_static\"\n local(command1)\n com = local(command2)\n if com.return_code == 0:\n return file_name\n else:\n return None", "def do_pack():\n\n now = datetime.now()\n time_now = now.strftime(\"%Y%m%d%H%M%S\")\n archive_name = \"versions/web_static_\" + time_now + \".tgz\"\n local('mkdir -p versions')\n archive_command = local(\"tar -zcvf \" + archive_name + \" web_static\")\n\n if archive_command.succeeded:\n return archive_name\n\n return None", "def unzip_and_untar(item):\n print(\"Unpacking %s\" % item)\n\n f = tarfile.open(item, mode=\"r\")\n f.extractall(path=\"working\")\n f.close()", "def _send_tarstream_to_node_and_extract(\n tarstream: io.BytesIO,\n node: Node,\n remote_path: Path,\n sudo: bool,\n) -> None:\n tar_path = Path('/tmp/dcos_e2e_tmp.tar')\n with tempfile.NamedTemporaryFile() as tmp_file:\n tmp_file.write(tarstream.getvalue())\n tmp_file.flush()\n\n node.send_file(\n local_path=Path(tmp_file.name),\n remote_path=tar_path,\n sudo=sudo,\n )\n\n tar_args = ['tar', '-C', str(remote_path), '-xvf', str(tar_path)]\n node.run(args=tar_args, sudo=sudo)\n node.run(args=['rm', str(tar_path)], sudo=sudo)", "async def mkarchivefs(self):\n # create an empty archivefs--just has a root.\n modfs : arcfs.ArchiveFS = arcfs.ArchiveFS()\n\n # add path of each file in the archive; since intermediate\n # directories are created automatically, there's no need to do\n # mkdir--although this method DOES mean that, if the archive contains\n # any empty directories, they will not be present in the fs. Not sure\n # yet if this is going to be an issue.\n async for arc_entry in self.archive_contents(dirs=False):\n # add root anchor to all entries\n modfs.touch(\"/\"+arc_entry)\n\n return modfs", "def do_pack():\n files = 'versions/web_static_{}{}{}{}{}{}.tgz'\\\n .format(T.year, T.month, T.day, T.hour, T.minute, T.second)\n local('mkdir -p versions')\n execute = local(\"tar -cvzf \" + files + \" ./web_static/\")\n if execute.succeeded:\n return files\n return None", "def archive_folders(args, directory_list):\n # Archive each of the subfolders\n # If we haven't selected archive then we return immediately.\n if not args.archive:\n return\n\n # Otherwise a simple tar command should do\n tar_commands = []\n for directory in directory_list:\n tar_commands.append(\"tar -cf - %s --remove-files | pigz -9 -p 8 > %s.tar.gz\" %\n (directory, directory))\n\n # Multi-thread our tar command\n processes = (subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n for cmd in tar_commands)\n\n # We use the islice command to split our commands into five smaller lists.\n running_processes = list(itertools.islice(processes, args.num_threads))\n\n while running_processes:\n for i, process in enumerate(running_processes):\n if process.poll() is not None: # Means that the process is complete!\n stdout, stderr = process.communicate() # Get the output of the completed process\n if not stderr == \"\":\n print stderr\n running_processes[i] = next(processes, None)\n # Run the next number in the list.\n if running_processes[i] is None: # No more commands waiting to be processed.\n del running_processes[i] # Not a valid process.\n break", "def upload_tearup(request, monkeypatch):\n # creating tree\n upload_dir = os.path.join('.', 'test_upload_' + get_unique_string())\n os.makedirs(upload_dir)\n open(os.path.join(upload_dir, 'l0_1.txt'), 'w').close()\n open(os.path.join(upload_dir, 'l0_2.txt'), 'w').close()\n dir = os.path.join(upload_dir, 'level1_1')\n os.makedirs(dir)\n open(os.path.join(dir, 'l1_1_1.txt'), 'w').close()\n dir = os.path.join(dir, 'level2_1')\n os.makedirs(dir)\n open(os.path.join(dir, 'l2_1_1.txt'), 'w').close()\n dir = os.path.join(dir, 'level2_2/level3_1')\n os.makedirs(dir)\n open(os.path.join(dir, 'l3_1_1.txt'), 'w').close()\n dir = os.path.join(upload_dir, 'level1_2')\n os.makedirs(dir)\n open(os.path.join(dir, 'l1_2_1.txt'), 'w').close()\n open(os.path.join(dir, 'l1_2_2.txt'), 'w').close()\n # setting up config (writing a file? why? it will not be applied anyway)\n config_file = os.path.join('.', 'test_config_' + get_unique_string())\n uploaded_dir = os.path.join('.', 'test_uploaded_' + get_unique_string())\n with open(config_file, 'w') as f:\n f.write(\"\"\"[Credentials]\nEmail : [email protected]\nPassword : some_pass\n\n[Locations]\nUploadedPath : {}\nCloudPath : /backups\nUploadPath : {}\n\n[Behaviour]\nMoveUploaded : yes\nRemoveUploaded : yes\nArchiveFiles : no\nRemoveFolders: yes\"\"\".format(uploaded_dir, upload_dir))\n monkeypatch.setattr('upload.IS_CONFIG_PRESENT', True)\n monkeypatch.setattr('upload.CONFIG_FILE', config_file)\n monkeypatch.setattr('upload.UPLOAD_PATH', upload_dir)\n monkeypatch.setattr('upload.UPLOADED_PATH', uploaded_dir)\n # setup uploader behaviour here to test desired combination (default: True, True, False, True)\n monkeypatch.setattr('upload.ARCHIVE_FILES', True)\n monkeypatch.setattr('upload.REMOVE_UPLOADED', True)\n monkeypatch.setattr('upload.MOVE_UPLOADED', False)\n monkeypatch.setattr('upload.REMOVE_FOLDERS', True)\n # faking cloud functions responses\n def cloud_auth(session, login=None, password=None):\n return True\n monkeypatch.setattr('upload.cloud_auth', cloud_auth)\n def get_csrf(session):\n return 'fake_csrf'\n monkeypatch.setattr('upload.get_csrf', get_csrf)\n def get_upload_domain(session, csrf=''):\n return 'fake_upload_domain'\n monkeypatch.setattr('upload.get_upload_domain', get_upload_domain)\n def get_cloud_space(session, csrf=''):\n return 1*1024*1024*1024\n monkeypatch.setattr('upload.get_cloud_space', get_cloud_space)\n def post_file(session, domain='', file=''):\n return ('1234567890123456789012345678901234567890', 100)\n monkeypatch.setattr('upload.post_file', post_file)\n def add_file(session, file='', hash='', size=0, csrf=''):\n return True\n monkeypatch.setattr('upload.add_file', add_file)\n def create_folder(session, folder='', csrf=''):\n return True\n monkeypatch.setattr('upload.create_folder', create_folder)\n # setting up logging\n log_file = os.path.join('.', 'test_log_' + get_unique_string())\n monkeypatch.setattr('upload.LOG_FILE', log_file)\n def upload_teardown():\n shutil.rmtree(upload_dir)\n os.unlink(config_file)\n shutil.rmtree(uploaded_dir, ignore_errors=True)\n os.unlink(log_file)\n request.addfinalizer(upload_teardown)", "async def create_tarball(output_path: str) -> bytes:\n file_paths = []\n for root, _, filenames in os.walk(output_path):\n for filename in filenames:\n file_path = os.path.join(root, filename)\n log.info(f'Adding {file_path} to tarball.')\n file_paths.append(file_path)\n\n with ByteStream() as stream:\n with tarfile.TarFile.open(fileobj=stream, mode='w:gz',\n compresslevel=1) as tar:\n for file_path in file_paths:\n tar_info = tar.gettarinfo(name=file_path)\n tar.addfile(tar_info)\n # Yield the header for the tarinfo file.\n yield stream.pop()\n\n with open(file_path, 'rb') as in_fp:\n # Read the input file in chunks of stream.block_size bytes.\n while True:\n data = in_fp.read(stream.block_size)\n if len(data) > 0:\n # Write the data to the buffer.\n tar.fileobj.write(data)\n # Yield a compressed file chunk so the client can receive it.\n yield stream.pop()\n # Write padding if necessary.\n if len(data) < stream.block_size:\n blocks, remainder = divmod(tar_info.size, tarfile.BLOCKSIZE)\n if remainder > 0:\n tar.fileobj.write(tarfile.NUL * (tarfile.BLOCKSIZE - remainder))\n yield stream.pop()\n blocks += 1\n tar.offset += blocks * tarfile.BLOCKSIZE\n break\n\n # Yield end-of-archive marker.\n yield stream.pop()", "def save_tar(self, target_dir):\n # type: (Text) -> None\n raise NotImplementedError(\"\")", "def load(self):\n self.cleanup()\n \n \"\"\" create the working directory \"\"\"\n try:\n os.makedirs(self.paths['workspace'])\n except OSError:\n pass\n \n files = [ \"base.tar.gz\" ]\n \n for f in files:\n (dirname, extension) = f.split(\".\", 1)\n \n \"\"\" download tar archive \"\"\"\n self.download(self.sessionurl + \"/\" + f)\n \n \"\"\" create directory for content of the archive \"\"\"\n destdir = os.path.join(self.paths['workspace'], dirname)\n \n try:\n os.makedirs(destdir)\n except OSError:\n pass\n \n if extension == \"tar.gz\":\n \"\"\" extract the tar archive \"\"\"\n tar = tarfile.open(os.path.join(self.paths['workspace'], f))\n tar.extractall(destdir)\n tar.close()\n \n logging.info(self.log_format((\"done\")))", "def archive_log(self, f_in, filename):\n if not os.path.isdir('archived'):\n os.makedirs('archived')\n f_out = gzip.open('archived/'+filename+'.gz', 'wb')\n f_out.writelines(f_in)\n f_out.close()\n f_in.close()", "def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,\n dry_run=0, owner=None, group=None):\n save_cwd = os.getcwd()\n if root_dir is not None:\n log.debug(\"changing into '%s'\", root_dir)\n base_name = os.path.abspath(base_name)\n if not dry_run:\n os.chdir(root_dir)\n\n if base_dir is None:\n base_dir = os.curdir\n\n kwargs = {'dry_run': dry_run}\n\n try:\n format_info = ARCHIVE_FORMATS[format]\n except KeyError:\n raise ValueError(\"unknown archive format '%s'\" % format)\n\n func = format_info[0]\n for arg, val in format_info[1]:\n kwargs[arg] = val\n\n if format != 'zip':\n kwargs['owner'] = owner\n kwargs['group'] = group\n\n try:\n filename = func(base_name, base_dir, **kwargs)\n finally:\n if root_dir is not None:\n log.debug(\"changing back to '%s'\", save_cwd)\n os.chdir(save_cwd)\n\n return filename", "def _archive_logs(self, logdir, files):\n cwd = os.getcwd()\n archive_wd = os.path.dirname(logdir)\n archive_file = os.path.basename(logdir) + \".tgz\"\n\n # move files into logdir for archive\n for f in files:\n self.logger.info(\"moving '%s' to archive folder\" % f)\n shutil.move(f, logdir)\n\n # move to logdir parent folder\n self.logger.info(\"archiving profile logs into '%s'\" % archive_file)\n os.chdir(archive_wd)\n archive = tarfile.open(archive_file, \"w:gz\")\n archive.add(os.path.basename(logdir))\n archive.close()\n\n # go back to current working dir and remove logdir\n os.chdir(cwd)\n shutil.rmtree(logdir)", "def put_archive(self, path, data):\n return self.client.api.put_archive(self.id, path, data)", "def test_unsafe_tar(self):\r\n\r\n def try_tar(tarpath):\r\n with open(tarpath) as tar:\r\n args = {\"name\": tarpath, \"course-data\": [tar]}\r\n resp = self.client.post(self.url, args)\r\n self.assertEquals(resp.status_code, 400)\r\n self.assertTrue(\"SuspiciousFileOperation\" in resp.content)\r\n\r\n try_tar(self._fifo_tar())\r\n try_tar(self._symlink_tar())\r\n try_tar(self._outside_tar())\r\n try_tar(self._outside_tar2())\r\n # Check that `import_status` returns the appropriate stage (i.e.,\r\n # either 3, indicating all previous steps are completed, or 0,\r\n # indicating no upload in progress)\r\n resp_status = self.client.get(\r\n reverse_course_url(\r\n 'import_status_handler',\r\n self.course.id,\r\n kwargs={'filename': os.path.split(self.good_tar)[1]}\r\n )\r\n )\r\n import_status = json.loads(resp_status.content)[\"ImportStatus\"]\r\n self.assertIn(import_status, (0, 3))", "def unpack(tarball, dst, verbose=False, match=None):\n print(\"extracting\", tarball)\n fname = os.path.basename(tarball).replace(\".tar.gz\", \"\")\n with contextlib.closing(tarfile.open(tarball)) as tar:\n for member in tar.getnames():\n if \"/\" not in member:\n continue\n name = member.replace(fname + \"/\", \"\", 1)\n if match is not None and not name.startswith(match):\n continue\n name = name[len(match) + 1:]\n\n dst_path = os.path.join(dst, name)\n if verbose:\n print(\" extracting\", member)\n tar.extract(member, dst)\n src_path = os.path.join(dst, member)\n if os.path.isdir(src_path) and os.path.exists(dst_path):\n continue\n shutil.move(src_path, dst_path)\n shutil.rmtree(os.path.join(dst, fname))", "def do_pack():\n d = datetime.now()\n local(\"mkdir -p versions\")\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz\\\n'.format(d.year, d.month, d.day, d.hour, d.minute, d.second)\n status = local(\"tar -cvzf\" + file_name + \" ./web_static/\", capture=True)\n if status.succeeded:\n return file_name\n return None", "def _tar_with_filter(\n path: Path,\n tar_filter: Callable[[tarfile.TarInfo], Optional[tarfile.TarInfo]],\n) -> io.BytesIO:\n tarstream = io.BytesIO()\n with tarfile.TarFile(fileobj=tarstream, mode='w') as tar:\n tar.add(name=str(path), arcname='/', filter=tar_filter)\n tarstream.seek(0)\n\n return tarstream", "async def main():\n \n # workflow status\n global status\n\n # Mode says which objects must be archived: DB dump, source files or both.\n try:\n mode=sys.argv[1]\n except IndexError:\n mode = 'all'\n\n # queue of files to be archived\n files_to_upload = deque()\n \n logger.trace(\"Archiving ...\")\n # Tasks to archive files and database dump\n list_of_threads = get_list_of_threads(mode=mode)\n\n tar_names = await asyncio.gather(*list_of_threads)\n\n # Clear names list, removing None elements if exist\n tar_names = [name for name in tar_names if name]\n\n files_to_upload.extend(tar_names)\n logger.trace(\"Ok.\")\n\n logger.trace(\"Uploading ...\")\n\n # Connect to the ftp-server and upload the archived files.\n await upload_to_ftp_server(host=FTP.SERVER.value,\n port=FTP.PORT.value,\n login=FTP.LOGIN.value,\n password=FTP.PASSWORD.value,\n files=files_to_upload)\n\n # Remove archived and dump files on the server site.\n clear_garbage(mode=mode, files=tar_names)\n\n # Check the workflow status. If it's not empty, send an error email.\n if len(status) > 0 and ERROR_NOTIFICATION_BY_EMAIL:\n backup_email()", "def archive_writeup(syn, evaluation, stat=\"VALIDATED\", reArchive=False):\n if type(evaluation) != synapseclient.Evaluation:\n evaluation = syn.getEvaluation(evaluation)\n\n print(\"\\n\\nArchiving\", evaluation.id, evaluation.name)\n print(\"-\" * 60)\n\n for sub, status in syn.getSubmissionBundles(evaluation, status=stat):\n # retrieve file into cache and copy it to destination\n checkIfArchived = filter(\n lambda x: x.get(\"key\") == \"archived\",\n status.annotations['stringAnnos'])\n if len(list(checkIfArchived)) == 0 or reArchive:\n projectEntity = synapseclient.Project(\n 'Archived {} {} {} {}'.format(\n sub.name.replace(\"&\", \"+\").replace(\"'\", \"\"),\n int(round(time.time() * 1000)),\n sub.id,\n sub.entityId))\n entity = syn.store(projectEntity)\n adminPriv = [\n 'DELETE', 'DOWNLOAD', 'CREATE', 'READ', 'CHANGE_PERMISSIONS',\n 'UPDATE', 'MODERATE', 'CHANGE_SETTINGS']\n syn.setPermissions(entity, \"3324230\", adminPriv)\n synapseutils.copy(syn, sub.entityId, entity.id)\n archived = {\"archived\": entity.id}\n status = utils.update_single_submission_status(status, archived)\n syn.store(status)", "def create_fs_tree(self, tree, base=None):\n\n def _create_path(name):\n return name if base is None else os.path.join(base, name)\n\n if 'files' in tree:\n for f in tree['files']:\n path = self.complete_path(_create_path(f))\n dummy_file(path)\n if 'links' in tree:\n for l in tree['links']:\n path = self.complete_path(_create_path(l))\n dummy_link(path)\n if 'dirs' in tree:\n for d in tree['dirs'].keys():\n path = self.complete_path(_create_path(d))\n os.mkdir(path)\n self.create_fs_tree(tree['dirs'][d], path)", "def upload(filename):\n client = connect()\n for _ in range(RETRIES):\n description = str(time.asctime()).replace(' ', '_')\n data = client.upload_archive(\n vaultName=VAULT_NAME,\n archiveDescription=description,\n body=open(filename))\n print(\"Success at \" + time.asctime() + \": ID \" + data['archiveId'])\n return", "def extract_tar(tar_path, target_folder):\n with tarfile.open(tar_path, 'r') as archive:\n archive.extractall(target_folder)", "def _setup_input(self, g):\n tarbytes = io.BytesIO()\n with tempfile.NamedTemporaryFile() as f:\n g.serialize(f.name, format=\"turtle\")\n tar = tarfile.open(name=\"out.tar\", mode=\"w\", fileobj=tarbytes)\n tar.add(f.name, arcname=\"input.ttl\")\n tar.close()\n # seek to beginning so our file is not empty when docker sees it\n tarbytes.seek(0)\n return tarbytes", "def tar(self, folders, tarfile):\n if not folders:\n raise ValueError('folders must be set')\n if not tarfile:\n raise ValueError('tarfile must be set')\n with OpenShell(self._conn) as os:\n if log.isEnabledFor(logging.DEBUG):\n RemoteOperation.run(os, 'tar -cvf %s %s' % (tarfile, ' '.join(folders)))\n else:\n RemoteOperation.run(os, 'tar -cf %s %s' % (tarfile, ' '.join(folders)))", "def do_pack():\n\n datenow = datetime.now()\n full_date = datenow.strftime(\"%Y%m%d%H%M%S\")\n\n try:\n if not os.path.isdir(\"versions\"):\n local(\"mkdir versions\")\n local_command = local(\"tar -cvzf versions/web_static_{}.tgz web_static\"\n .format(full_date))\n return local_command\n except Exception:\n return None", "def compress(repo, location):\r\n os.chdir(location)\r\n debug(\"Compressing repositories in [%s]...\" % (location), True)\r\n exec_cmd(\"tar -zcvf bitbucket-backup-%s-%s.tar.gz `ls -d *`\" % (repo.get('owner'), datetime.datetime.now().strftime('%Y%m%d%H%m%s')))\r\n debug(\"Cleaning up...\", True)\r\n for d in os.listdir(location):\r\n path = os.path.join(location, d)\r\n if os.path.isdir(path):\r\n exec_cmd(\"rm -rfv %s\" % path)", "def put_file(self, path, contents):\n data = io.BytesIO()\n with tarfile.open(fileobj=data, mode='w') as tarfile_:\n file_contents = contents.encode() if isinstance(contents, str) else contents\n tarinfo = tarfile.TarInfo(path)\n\n # We set the modification time to now because some systems (e.g. logging) rely upon\n # timestamps to determine whether to read config files.\n tarinfo.mtime = time.time()\n tarinfo.size = len(file_contents)\n tarfile_.addfile(tarinfo, io.BytesIO(file_contents))\n data.seek(0)\n\n self.container.put_archive(path='/', data=data)" ]
[ "0.6552709", "0.6145302", "0.59912825", "0.590548", "0.5857982", "0.5824896", "0.5788129", "0.57454973", "0.5727448", "0.5701735", "0.5637337", "0.5634134", "0.5598457", "0.55868477", "0.5579905", "0.5539658", "0.5536656", "0.5533471", "0.5514017", "0.5374604", "0.53565294", "0.5340064", "0.53372705", "0.5326481", "0.5326471", "0.5321925", "0.53051096", "0.5304542", "0.5304428", "0.5285305", "0.5258572", "0.52570444", "0.52488565", "0.5243083", "0.5242437", "0.52383196", "0.52371883", "0.5232421", "0.5228025", "0.52243936", "0.5205819", "0.5196649", "0.5190336", "0.51884484", "0.51813287", "0.51756704", "0.5166441", "0.51403844", "0.51321346", "0.5130623", "0.5096009", "0.5094113", "0.5089888", "0.50866497", "0.5084439", "0.5082352", "0.5072536", "0.50722533", "0.5067777", "0.5064893", "0.5063517", "0.50570035", "0.50545394", "0.5051109", "0.50267124", "0.5022791", "0.50120986", "0.50076187", "0.5006971", "0.50053746", "0.50018287", "0.4996386", "0.49897563", "0.49876285", "0.49850982", "0.49838585", "0.49745575", "0.49647722", "0.49622095", "0.49586678", "0.49416858", "0.49399912", "0.4930288", "0.49272537", "0.49254116", "0.49247465", "0.49160352", "0.4909445", "0.49003014", "0.4897731", "0.4893684", "0.48933652", "0.48885998", "0.4882586", "0.48767698", "0.48717877", "0.4865319", "0.48630032", "0.4858402", "0.48499927" ]
0.6242641
1
Install the required Python packages inside the virtualenv.
def install_requirements(): require("release", provided_by=[deploy]) with cd("%(path)s" % env): sudo("./bin/pip install -r ./releases/%(release)s/requirements.txt" % env)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sub_install_python_requirements():\n # Activate the virtualenv\n activate = 'source {0}/{1}/bin/activate'.format(\n env.virtualenv['dir'], env.virtualenv['name'])\n run(activate)\n\n # Install Python requirements\n install = 'pip install -r /vagrant/Flask_app/requirements.txt'\n\n # Join and execute the commands\n run(activate + '; ' + install)", "def setup_virtualenv():\n run('virtualenv -p %(python)s --no-site-packages %(env_path)s;' % env)\n run('source %(env_path)s/bin/activate; easy_install -U setuptools; easy_install pip;' % env)", "def install_packages():\n\n require('environment', provided_by=env.environments)\n packages_file = os.path.join(PROJECT_ROOT, 'requirements', 'packages.txt')\n system.install_packages_from_file(packages_file)", "def install_deps():\n pipenv_dev = run('pipenv install --dev'.split(), check=True)\n print('Installed dependencies and virtual environment. Type `pipenv shell` to activate later.')", "def sync_virtualenv(ctx):\n if not path.isfile(\"./pyenv/bin/pip\"):\n ctx.run(\"virtualenv --no-site-packages --python=/usr/bin/python2.7 pyenv\")\n ctx.run(\"PIP_DOWNLOAD_CACHE=/var/tmp/ ./pyenv/bin/pip install -r requirements.txt\")\n print(\"\"\"\n Installation completed. Please check any error messages above.\n\n If you are going to use `openstack` or ansible directly on the command line, run\n\n . ./pyenv/bin/activate\n\n or even add it to your ~/.bashrc\n \"\"\")", "def install_packages():\n with open(\"requirements.txt\", \"w\") as requirements_file:\n subprocess.run([\"pipenv\", \"lock\", \"-r\"], stdout=requirements_file)\n\n subprocess.run(\n [\"pip\", \"install\", \"-r\", \"requirements.txt\", \"--no-deps\", \"-t\", BUILD_DIR]\n )", "def install_requirements():\n with cd(env.code_dir):\n with _virtualenv():\n sudo('pip install -r requirements.txt', pty=True)", "def bootstrap():\n sub_install_packages()\n sub_install_virtualenv()\n sub_create_virtualenv()\n sub_install_python_requirements()", "def build_virtualenv():\n\n puts(yellow(\"Install dependencies from requirements.txt\"))\n with cd(env.source_dir):\n with prefix('source %s' % in_rwd('bin/activate')):\n sudo('pip install -r %s' % env.requirements_file,\n user=env.app_user)\n sudo('python setup.py develop', user=env.app_user)", "def install_requirements():\n local('. fabric_factory/ve/bin/activate; easy_install pip')\n local('. fabric_factory/ve/bin/activate; pip install -r requirements.txt')", "def sub_install_virtualenv():\n sudo('pip install virtualenv') # Need sudo b/c installing to system Python", "def install_virtualenv():\n from .project import sudo_project, virtualenv_path\n\n with sudo():\n virtualenv.install()\n\n with sudo_project():\n virtualenv.create(virtualenv_path())", "def install_requirements():\n run('source %(env_path)s/bin/activate; pip install -U -r %(repo_path)s/requirements.txt' % env)", "def install(name):\n base = '/home/{}/venvs/{}/base.txt'.format(env.user, name)\n prod = '/home/{}/venvs/{}/prod.txt'.format(env.user, name)\n\n # Upload requirements file.\n put(utils.file_path('requirements', 'base.txt'), base)\n put(utils.file_path('requirements', 'prod.txt'), prod)\n\n # Activate the virtual environment.\n with prefix('source /home/{}/venvs/{}/bin/activate'.format(env.user, name)):\n run('pip install -r {}'.format(prod))", "def install():\n verun('pip install -r {0}'.format(requirements))", "def install_requirements():\r\n if env.hosts:\r\n run ('cd %(path)s %(command_join)s env/bin/pip install -r current-release/requirements.txt' % env)\r\n else:\r\n local('%spip install -r requirements.txt' % virtualenv_bin, capture=False)", "def pip_install():\n _require_environment()\n remote(PIP_INSTALL_PREFIX)", "def install_python():\n _require_environment()\n # TODO: find a better criteria for when to use apt-get update\n if not files.exists('/usr/bin/python'):\n apt_get_update()\n # TODO: Install Python 2.7.3 from source, regardless of Linux distribution\n sudo('apt-get -y -qq install python python2.6 python2.6-dev pkg-config gcc')\n sudo('apt-get -y -qq install python-setuptools')\n sudo('easy_install virtualenv')\n sudo('easy_install pip')\n sudo('pip install virtualenvwrapper')\n with settings(warn_only=True):\n sudo(_interpolate('mkdir %(workon)s'))\n sudo(_interpolate('chmod g+w %(workon)s'))\n sudo(_interpolate('chown %%(user)s:%%(user)s %(workon)s') % env)", "def setup(ctx):\r\n ctx.run('pip3 install -r requirements.txt')", "def install_requirements():\n req_path = os.path.join(vlogger_dir, \"requirements.txt\")\n subprocess.call([\"pip\", \"install\", \"-r\", req_path])", "def pip_install(*args):\n call(WITH_VENV, '.venv', 'pip', 'install', *args)", "def YumInstall(vm):\n vm.Install('build_tools')\n vm.InstallEpelRepo()\n vm.InstallPackages(YUM_PACKAGES)", "def install():\n\n if (Path.cwd() / \"src\" / \"environment.yml\").is_file():\n call([\"conda\", \"install\", \"--file\", \"src/environment.yml\", \"--yes\"])\n\n pip_command = [\"install\", \"-U\", \"-r\", \"src/requirements.txt\"]\n\n if os.name == \"posix\":\n python_call(\"pip\", pip_command)\n else:\n command = [sys.executable, \"-m\", \"pip\"] + pip_command\n subprocess.Popen(command, creationflags=subprocess.CREATE_NEW_CONSOLE)", "def venv():\n path = '/srv/addok/venv/'\n if not exists(path):\n with sudo(user='addok'):\n run(f'python3 -m venv {path}')\n pip('install pip -U')", "def install_testutils(virtual_env):\n subprocess.call(['easy_install', 'nose'], env=virtual_env)\n subprocess.call(['easy_install', 'coverage'], env=virtual_env)\n subprocess.call(['easy_install', 'MySQL-python'], env=virtual_env)", "def _setup_venv(self):\n python.setup_virtualenv(\n self.venv_path, sudo_user=self.user, python_version=3)\n packages = [\n \"future\", \"lxml\", \"ipaddress\", \"sqlalchemy < 2.0\", \"python-memcached\",\n \"python-dateutil\", \"configparser\"\n ]\n if self.dbengine == \"postgres\":\n packages.append(\"psycopg2-binary\")\n else:\n packages.append(\"mysqlclient\")\n python.install_packages(packages, self.venv_path, sudo_user=self.user)\n target = \"{}/master.zip\".format(self.home_dir)\n if os.path.exists(target):\n os.unlink(target)\n utils.exec_cmd(\n \"wget https://github.com/sys4/automx/archive/master.zip\",\n sudo_user=self.user, cwd=self.home_dir)\n self.repo_dir = \"{}/automx-master\".format(self.home_dir)\n if os.path.exists(self.repo_dir):\n shutil.rmtree(self.repo_dir)\n utils.exec_cmd(\n \"unzip master.zip\", sudo_user=self.user, cwd=self.home_dir)\n utils.exec_cmd(\n \"{} setup.py install\".format(\n python.get_path(\"python\", self.venv_path)),\n cwd=self.repo_dir)", "def install():\n build()\n sh(\"%s setup.py develop\" % PYTHON)", "def install_deps():\n dist = check_distribution()\n if dist == Distribution.TEXLIVE:\n texlive_install_deps()\n elif dist == Distribution.MIKTEX:\n miktex_install_deps()\n\n install_pygments()", "def install_dependencies():\n\n # check python version and verify we are using Python 3\n if sys.version[0] < '3':\n print(\"ERROR: python version 3 required. You are using version \"\n \"{}\".format(sys.version))\n print(\"You must install python 3 from https://www.python.org\")\n print(\"Make sure to check the 'pip' package manager option when\")\n print(\"installing python\")\n return\n try:\n import pip\n except ModuleNotFoundError:\n print(\"The python 'pip' package manager is required.\")\n print(\"Go to https://www.python.org and download Python 3\")\n print(\"When re-installing, select 'modify' and make sure\")\n print(\"to check the 'pip' option\")\n return\n\n print(\"Python 3 and pip is installed\")\n\n # upgrade/install dependencies such as robot framework\n subprocess.run([\"python\", \"-m\", \"pip\", \"install\", \"-q\", \"--user\",\n \"--no-warn-script-location\", \"-r\",\n os.path.join(os.path.curdir, \"requirements.txt\")],\n shell=True, check=True)\n print(\"Robot framework is installed and up to date\")\n print(\"PyQT5 is installed and up to date\")", "def sub_install_python_requirements_aws():\n # Activate the virtualenv\n activate = 'source {0}/{1}/bin/activate'.format(\n env.virtualenv['dir'], env.virtualenv['name'])\n run(activate)\n\n # make sure the directory is there\n run('mkdir -p /home/ubuntu')\n\n # put the local directory '/Users/jenniferchen/github/HS698-project'\n # - it contains files or subdirectories\n # to the ubuntu server\n put('/Users/jenniferchen/github/HS698-project',\n '/home/ubuntu')\n\n # Install Python requirements\n install = 'pip install -r ' \\\n '/home/ubuntu/HS698-project/Flask_app/requirements.txt'\n\n # Join and execute the commands\n sudo(install)\n # Run the file app.py to start the Flask app\n dev_server = 'python HS698-project/Flask_app/app.py'\n run(dev_server)", "def install_system_packages():\n print(\"Installiere notwendige Pakete...\")\n _run('sudo apt update')\n _run(\n \"sudo apt install \"\n \"apache2 apache2-dev python3-dev python3-venv python3-pip postgresql-contrib libpq-dev\"\n )\n print(\"Fertig!\", end=\"\\n\\n\")", "def update_dependencies():\n pip = env.virtualenv.child('bin', 'pip')\n reqs = env.code_dir.child('deploy-requirements.txt')\n sudo('%s -q install -U pip' % pip)\n sudo('%s -q install -r %s' % (pip, reqs))", "def setup():\n global venvs\n \n try:\n os.mkdir(basedir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n os.chdir(basedir)\n \n # Delete virtualenvs and recreate\n for venv in glob('venv-*'):\n shutil.rmtree(venv)\n for py in available_python_versions():\n check_call(['virtualenv', '-p', py, '--system-site-packages', 'venv-%s' % py])\n venvs.append((py, 'venv-%s' % py))\n \n # Check out and update the repository\n if not os.path.exists('ipython'):\n try :\n check_call(['git', 'clone', ipy_repository])\n except CalledProcessError :\n check_call(['git', 'clone', ipy_http_repository])\n os.chdir(repodir)\n check_call(['git', 'checkout', 'master'])\n try :\n check_call(['git', 'pull', ipy_repository, 'master'])\n except CalledProcessError :\n check_call(['git', 'pull', ipy_http_repository, 'master'])\n os.chdir(basedir)", "def install_requirements():\n run_commands('pip install -r ./requirements/dev.txt')", "def update_requirements():\n\n with virtualenv(VIRTUALENV_PATH):\n cmd = ['pip install']\n cmd += ['--requirement %s' % os.path.join(CODE_DIR,'requirements.txt')]\n run(' '.join(cmd))", "def setup():\r\n global venvs\r\n\r\n try:\r\n os.mkdir(basedir)\r\n except OSError, e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n os.chdir(basedir)\r\n\r\n # Delete virtualenvs and recreate\r\n for venv in glob('venv-*'):\r\n shutil.rmtree(venv)\r\n for py in available_python_versions():\r\n check_call(['virtualenv', '-p', py,\r\n '--system-site-packages', 'venv-%s' % py])\r\n venvs.append((py, 'venv-%s' % py))\r\n\r\n # Check out and update the repository\r\n if not os.path.exists('Theano'):\r\n try:\r\n check_call(['git', 'clone', ipy_repository])\r\n except CalledProcessError:\r\n check_call(['git', 'clone', ipy_http_repository])\r\n os.chdir(repodir)\r\n check_call(['git', 'checkout', 'master'])\r\n try:\r\n check_call(['git', 'pull', ipy_repository, 'master'])\r\n except CalledProcessError:\r\n check_call(['git', 'pull', ipy_http_repository, 'master'])\r\n os.chdir(basedir)", "def set_installed_packages():\n global INSTALLED_PACKAGES, REQUIRED_VERSION\n if INSTALLED_PACKAGES:\n return\n\n if os.path.exists(BIN_PYTHON):\n pip = subprocess.Popen(\n (BIN_PYTHON, '-m', 'pip', 'freeze'),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n (stdout, stderr) = pip.communicate()\n pip.wait()\n\n INSTALLED_PACKAGES = [normalize_package_name(r.decode().split('==')[0].lower()) for r in stdout.split()]\n REQUIRED_VERSION = next((package for package in INSTALLED_PACKAGES if re.match(r'^lore[!<>=]', package)), None)\n if REQUIRED_VERSION:\n REQUIRED_VERSION = re.split(r'[!<>=]', REQUIRED_VERSION)[-1]", "def requires(*requirements, **kwargs):\n if '/.tox/' in sys.executable:\n venv = os.path.dirname(os.path.dirname(sys.executable))\n elif env.virtual_env: # pragma: no cover\n venv = env.chut_virtualenv = env.virtual_env\n else: # pragma: no cover\n venv = os.path.expanduser(kwargs.get('venv', '~/.chut/venv'))\n if not env.pip_download_cache: # pragma: no cover\n env.pip_download_cache = os.path.expanduser('~/.chut/cache')\n sh.mkdir('-p', env.pip_download_cache)\n bin_dir = os.path.join(venv, 'bin')\n if bin_dir not in env.path: # pragma: no cover\n env.path = [bin_dir] + env.path\n requirements = list(requirements)\n if 'chut' not in requirements:\n requirements.insert(0, 'chut')\n if not test.d(venv): # pragma: no cover\n import urllib\n url = 'https://raw.github.com/pypa/virtualenv/master/virtualenv.py'\n urllib.urlretrieve(url, '/tmp/_virtualenv.py')\n sh[sys.executable]('-S /tmp/_virtualenv.py', venv) > 1\n sh.rm('/tmp/_virtualenv*', shell=True)\n info('Installing %s...' % ', '.join(requirements))\n sh.pip('install -qM', *requirements) > 1\n elif env.chut_virtualenv:\n upgrade = '--upgrade' in sys.argv\n if (env.chut_upgrade or upgrade): # pragma: no cover\n installed = ''\n else:\n installed = str(sh.pip('freeze')).lower()\n requirements = [r for r in requirements if r.lower() not in installed]\n if requirements: # pragma: no cover\n info('Updating %s...' % ', '.join(requirements))\n sh.pip('install -qM --upgrade', *requirements) > 1\n executable = os.path.join(bin_dir, 'python')\n if not env.chut_virtualenv: # pragma: no cover\n env.chut_virtualenv = venv\n os.execve(executable, [executable] + sys.argv, env)", "def install_environment(root):\n sys.stdout.write('Installing virtualenv into %s \\n' % root)\n try:\n import virtualenv\n except ImportError:\n sys.stdout.write('Installing virtualenv into global interpreter \\n')\n subprocess.call([VE_GLOBAL_SCRIPT, PROJECT_ROOT])\n import virtualenv\n\n if path.exists(root):\n shutil.rmtree(root)\n virtualenv.logger = virtualenv.Logger(consumers=[])\n virtualenv.create_environment(root, site_packages=False)\n ret_code = subprocess.call([VE_SCRIPT, PROJECT_ROOT, root])\n sys.exit(ret_code)", "def install_requirements():\n _git_pull()\n _install_requirements()\n _syncdb()\n _migrate()\n _restart_webserver()", "def install_backend_deps():\n with lcd(BACKENDDIR):\n cmd = '%(pip)s install -r %(requirements_file)s' % {\n 'pip': get_pip(),\n 'requirements_file': requirements_file\n }\n local(cmd)\n # Install Pandoc\n local(\"sudo apt-get install pandoc\")\n # Install Pyandoc\n with lcd(HOMEDIR):\n if not os.path.isdir(os.path.join(HOMEDIR, 'pyandoc')):\n local(\"git clone [email protected]:kennethreitz/pyandoc.git\")\n with lcd(\"pyandoc\"):\n if not env.local:\n\t with prefix('. /home/ubuntu/virtualenvs/venv-system/bin/activate'):\n local(\"python setup.py install\")\n else:\n local(\"python setup.py install\")", "def AptInstall(vm):\n for package in APT_PACKAGES:\n vm.InstallPackages(package)", "def sub_install_packages():\n sudo('apt-get update') # Update repository links\n sudo('apt-get -y upgrade') # Upgrade the system\n package_str = ' '.join(INSTALL_PACKAGES)\n sudo('apt-get -y install ' + package_str) # Install the packages", "def bootstrap():\n _require_environment()\n\n adduser()\n install_python()\n install_git()\n install_apache()\n install_mysql()\n setup_project()", "def setup():\n\n debs = (\"python-setuptools\", \"apache2\", \"libapache2-mod-wsgi\")\n\n require(\"hosts\", provided_by=[production, staging])\n sudo(\"apt-get install %s\" % \" \".join(debs))\n sudo(\"easy_install virtualenv pip\")\n sudo(\"mkdir -p %(path)s\" % env)\n with cd(\"%(path)s\" % env):\n sudo(\"mkdir -p releases; mkdir -p packages\")\n sudo(\"virtualenv --no-site-packages .\")\n sudo(\"mkdir -p /var/log/twit-demo; chown www-data:www-data /var/log/twit-demo\")", "def AptInstall(vm):\n vm.Install('build_tools')\n vm.InstallPackages(APT_PACKAGES)", "def install_packages(self):\n for package in self.packages:\n utils.exec_cmd('yum install -v -y {0}'.format(package))", "def _setup_virtualenv():\n if files.exists(_interpolate(VIRTUALENV_DIR)):\n print _interpolate('virtualenv %(virtualenv)s already exists')\n else:\n with prefix(_virtualenvwrapper_prefix()):\n run(_interpolate('mkvirtualenv --no-site-packages %(virtualenv)s'))\n with hide('commands'):\n print 'virtualenv %s created with python %s\\n' % (env.project['virtualenv'], run(GET_PYTHON_VERSION))", "def pip_install_requirements(virtualenv_path, requirements_path, cache_path, log_path):\n\n requirements_file = os.path.join(requirements_path, 'requirements.txt')\n log_file = os.path.join(log_path, 'pip.log')\n\n if not exists(requirements_file) or not exists(virtualenv_path):\n abort(red('Could not install packages. Virtual environment or requirements.txt not found.'))\n\n args = (virtualenv_path, requirements_file, cache_path, log_file)\n run('%s/bin/pip install -r %s --download-cache=%s --use-mirrors --quiet --log=%s' % args)", "def install():\n deploy()\n configure()", "def set_up(dev=False):\n _install_dependencies()", "def setup():\n require('hosts', provided_by=[prod])\n require('code_root')\n sudo('apt-get update')\n sudo('apt-get install -y python-setuptools')\n sudo('easy_install pip')\n sudo('pip install virtualenv')\n sudo('aptitude install -y apache2')\n sudo('aptitude install -y libapache2-mod-wsgi')\n sudo('apt-get install -y nginx')\n update_webserver_config()\n sudo('mkdir -p %s; cd %s; virtualenv .;' % (env.code_root, env.code_root))\n sudo('cd %s;mkdir releases; mkdir shared; mkdir packages; mkdir shared/media; mkdir shared/media/file;' % (env.code_root))\n deploy()", "def install_deps():\n click.echo(\"install_deps\")", "def YumInstall(vm):\n vm.InstallPackages(YUM_PACKAGES)\n _Install(vm)", "def conda_install_requirements(venv):\n # Upload the requirements file.\n put(utils.files('requirements', 'base.txt'), utils.home('base.txt'))\n put(utils.files('requirements', 'prod.txt'), utils.home('prod.txt'))\n\n # Activate the virtual environment.\n activate = '{0}/bin/activate'.format(utils.home('apps', 'miniconda'))\n\n with prefix('source {activate} {venv}'.format(venv=venv, activate=activate)):\n run('pip install -r {0}'.format(utils.home('prod.txt')))\n\n # Remove the uploaded files.\n with cd(utils.home()):\n run('rm {0}'.format(utils.home('base.txt')))\n run('rm {0}'.format(utils.home('prod.txt')))", "def _install_packages(packages):\n for package in packages:\n cuisine.package_ensure(package)", "def pip_installs():\n pip = r'pip-2.7 install --install-option=\"--install-scripts=$PWD/bin\" --install-option=\"--install-lib=$PWD/lib/python2.7\" '\n with settings(warn_only=True):\n run(\"mkdir $HOME/tmp\")\n with cd(remote_dir):\n for installation in install_list:\n run(\"export TEMP=$HOME/tmp && %s %s\" % (pip, installation))\n run(\"echo '#%s' >> $HOME/.bash_profile\" % python_add_str)", "def setup_virtual_env(self):\n\n venv(\"{0}_env\".format(self.app_name), self.install_django_project)", "def pip_requirements():\n\n require(\n \"virtualenv_path\",\n \"requirements_path\",\n \"http_proxy\",\n \"https_proxy\",\n \"sudo_user\",\n )\n cmd = \"pip install --quiet --requirement %s\" % env.requirements_path\n\n # append packages url if specified\n if env.get(\"packages_url\") is not None:\n cmd += \" -f %s\" % env.get(\"packages_url\")\n\n with context_managers.proxy(env.http_proxy, env.https_proxy):\n with context_managers.virtualenv(env.virtualenv_path):\n sudo(cmd, user=env.sudo_user)", "def dev():\n\n # Python build headers.\n packages = [\n 'python3-setuptools',\n 'python3-dev',\n 'python3-tk',\n 'python-setuptools',\n 'python-dev',\n 'python-tk',\n ]\n\n sudo('apt-get -y install {}'.format(' '.join(packages)))", "def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()", "def develop():\n dev_packages = [\n 'pytest', 'pytest-xdist', 'pytest-pep8', 'tox', 'httpie'\n ]\n if not path.exists(\"env\"):\n fab.local(\"virtualenv -p /usr/bin/python3 env\")\n fab.local(\"env/bin/pip install --upgrade pip setuptools\")\n fab.local(\"env/bin/python setup.py develop\")\n fab.local(\"env/bin/pip install {}\".format(\" \".join(dev_packages)))", "def install(self):\n\n self.clean_git_checkout(self.git_repo, '/src')\n\n self.__copy_config_templates();\n\n self.local(\"sudo pip install -r src/requirements.txt --upgrade\")\n\n if not self.is_local():\n PiService.install(self) #copy to remote\n\n self.sudo(\"pip install -r src/requirements.txt --upgrade\")", "def pip_packages():\n packages = reduce(lambda a, x: \"%s %s\" % (a, x), PIP_PACKAGES, '')\n sudo(\"pip install %s &> /dev/null\" % packages)", "def install_pip():\n pip_install_txt = os.path.join(os.path.abspath(os.path.join(__file__, os.pardir)), \"build_test_dependencies.txt\")\n call_subprocess(\"python3 -m pip install -r %s\" % pip_install_txt)\n print(\"Stage install dependencies -- COMPLETED --\")", "def install(self) -> None:\n if self.local_packages:\n self.prepare_install_local()\n self.install_local()\n if self.remote_packages:\n self.install_from_url()\n if self.repository_packages:\n self.install_from_repository()\n if self.debuginfo_packages:\n self.install_debuginfo()", "def create_virtualenv():\n\n require(\n \"virtualenv_path\",\n \"python_bin\",\n \"http_proxy\",\n \"https_proxy\",\n \"sudo_user\",\n )\n # Added system-site-packages as environment\n # uses global packages like MySQLdb\n cmd = \"virtualenv --python %s %s --system-site-packages\" % (env.python_bin, env.virtualenv_path)\n\n with context_managers.proxy(env.http_proxy, env.https_proxy):\n # Needs to cd into a directory that the sudo user can temporarily write\n # to.\n with cd(\"/tmp\"):\n sudo(cmd, user=env.sudo_user)", "def setup_cappa():\n with cd('/vagrant'):\n sudo('python setup.py install')", "def install():\n remote_egg_path = os.path.join(remote_egg_dir, get_egg_name())\n sudo('easy_install -U %s' % remote_egg_path)\n sudo('rm %s' % remote_egg_path)", "def create_virtualenv(virtualenv_path):\n\n run('virtualenv %s --no-site-packages' % virtualenv_path)", "def setUp(self):\n self.tempdir = tempfile.TemporaryDirectory()\n self.tempdir_path = pathlib.Path(self.tempdir.name)\n self.python = self.tempdir_path / \"bin\" / \"python\"\n venv.create(\n env_dir=self.tempdir_path, system_site_packages=False, with_pip=True\n )\n\n # Ensure the virtual environment has a recent version of pip which\n # has support for PEP 517.\n checked_subprocess_run(f\"{self.python} -m pip install --upgrade pip\")", "def install_test_deps():\n workon = '.'\n if VENVWRAPPER:\n workon=os.getenv(\"WORKON_HOME\")\n cmd = '{workon}/{env}/bin/pip install nose-cov webtest mock'.format(\n envs=ENVS, env=VENV, workon=workon)\n print(cmd)\n subprocess.call(cmd.split())", "def install(force, packages):\n setup_audit_log()\n for pspec in CFG.package_specs(packages):\n perform_install(pspec, is_upgrade=False, force=force, quiet=False)", "def AptInstall(vm):\n vm.InstallPackages(APT_PACKAGES)\n _Install(vm)", "def required():\n pip = path(\"bin/pip\")\n if not pip.exists():\n sh('%s install -E tg2env -r normal-reqs.txt --extra-index-url=http://www.turbogears.org/2.0/downloads/current/index' % pip)\n call_pavement('pavement.py', 'develop')", "def install_frontend_deps():\n\n with lcd(FRONTENDDIR):\n cmd = '%(npm)s install' % {'npm': get_npm()}\n local(cmd)\n cmd = '%(bower)s install' % {'bower': get_bower()}\n local(cmd)", "def texlive_install_deps():\n print('Installing dependencies...')\n subprocess.run([\"tlmgr\", \"install\"] + read_deps())\n print('Dependencies installed')", "def install_requirements(requirements_json, ext):\n\n @task\n def requirements_json_install_virtualenv():\n with cd('/home/vagrant'):\n fname = '/home/vagrant/requirements.' + ext\n put(StringIO(requirements_json), fname)\n run('virtualenv venv')\n run('source venv/bin/activate; cappa install -r ' + fname)\n\n return requirements_json_install_virtualenv", "def init():\n print(\"Installed everything under {0} \"\n \"virtual environment\".format(package_name()))", "def quickstart():\n if not os.path.exists(\"./fabric_factory/ve\"):\n bootstrap()\n else:\n print \"No need to create virtualenv, 've' already exists\"\n install_requirements()\n project_linkage()", "def YumInstall(vm):\n _Install(vm)", "def YumInstall(vm):\n _Install(vm)", "def install(env, requirements, args, quiet=False):\n if os.path.isfile(requirements):\n args += ('-r', requirements)\n label = 'project'\n else:\n args += ('-U', '-e', '.')\n label = 'library'\n\n if not quiet:\n print('== Step 2. Install {0} =='.format(label))\n\n pip_cmd(env, ('install', ) + args, echo=not quiet)\n\n if not quiet:\n print()\n\n return True", "def installDevelopmentPackageDependencies():\n sudo('DEBIAN_FRONTEND=noninteractive '\n 'apt-get install -y gcc python-all-dev')", "def install_requirements(installation_files=None, update_pip=False):\n from .project import sudo_project, virtualenv_path, requirements_txt, \\\n git_repository_path\n\n if not installation_files:\n installation_files = requirements_txt()\n\n if isinstance(installation_files, basestring):\n installation_files = [installation_files]\n\n with sudo_project():\n path = virtualenv_path()\n\n for installation_file in installation_files:\n info('Installing requirements from file {}', installation_file)\n\n with virtualenv.activate(path), cd(git_repository_path()):\n installation_method = get_installation_method(installation_file)\n if installation_method == 'pip':\n if update_pip:\n python.update_pip()\n python.pip('install', '-r', installation_file)\n elif installation_method == 'setuptools':\n with cd(git_repository_path()):\n run('python {} develop'.format(installation_file))\n else:\n raise ValueError(\n '\"{}\" is not a valid installation file'.format(\n installation_file))", "def test_in_virtualenv(self):\n new_executor = self.executor.in_virtualenv('/appenv')\n output, _err = new_executor.pip.install('a-local-package').batch()\n self.assertEqual(output, 'a-local-package installed')\n new_executor_one = self.executor.patch_env(PATH='/bin')\n new_executor_two = new_executor_one.in_virtualenv('/appenv')\n output, _err = new_executor_two.pip.install('a-local-package').batch()\n self.assertEqual(output, 'a-local-package installed')", "def update_requirements():\n\n require('code_root', provided_by=env.environments)\n requirements = os.path.join(env.code_root, 'requirements')\n sdists = os.path.join(requirements, 'sdists')\n base_cmd = ['pip install']\n base_cmd += ['-q -E %(virtualenv_root)s' % env]\n base_cmd += ['--no-index --find-links=file://%s' % sdists]\n # install GDAL by hand, before anything else that might depend on it\n cmd = base_cmd + ['--no-install \"GDAL==1.6.1\"']\n sudo(' '.join(cmd), user=env.deploy_user)\n # this directory won't exist if GDAL was already installed\n if files.exists('%(virtualenv_root)s/build/GDAL' % env):\n sudo('rm -f %(virtualenv_root)s/build/GDAL/setup.cfg' % env, user=env.deploy_user)\n with cd('%(virtualenv_root)s/build/GDAL' % env):\n sudo('%(virtualenv_root)s/bin/python setup.py build_ext '\n '--gdal-config=gdal-config '\n '--library-dirs=/usr/lib '\n '--libraries=gdal1.6.0 '\n '--include-dirs=/usr/include/gdal '\n 'install' % env, user=env.deploy_user)\n # force reinstallation of OpenBlock every time\n with settings(warn_only=True):\n sudo('pip uninstall -y -E %(virtualenv_root)s ebpub ebdata obadmin' % env)\n for file_name in ['ebpub.txt', 'ebdata.txt', 'obadmin.txt', 'openrural.txt']:\n apps = os.path.join(requirements, file_name)\n cmd = base_cmd + ['--requirement %s' % apps]\n sudo(' '.join(cmd), user=env.deploy_user)", "def venv(session):\n # Install dependencies.\n session.install(\"--upgrade\", \"pip\", \"setuptools\")\n session.install(\"-r\", \"requirements-dev.txt\")\n session.install(\"-e\", \".\")\n\n # Customize the venv.\n env_dir = Path(session.bin)\n activate = env_dir / 'activate'\n with activate.open('a') as f:\n f.write(f'\\n[ -f {activate.resolve()}/postactivate ] && . {activate.resolve()}/postactivate\\n')\n\n {{ cookiecutter.project_name }}_complete = nox_file / 'contrib/{{ cookiecutter.project_name }}-complete.sh'\n postactivate = env_dir / 'postactivate'\n with postactivate.open('a') as f:\n f.write('export PYTHONBREAKPOINT=bpdb.set_trace\\n')\n f.write(f'source { {{ cookiecutter.project_name }}_complete.resolve() }\\n')\n\n predeactivate = env_dir / 'predeactivate'\n with predeactivate.open('a') as f:\n f.write('unset PYTHONBREAKPOINT\\n')", "def install_django_project(self):\n\n from django.conf import settings as django_settings\n\n with cd(\"{0}\".format(self.app_remote_dir)):\n\n pip(\"install -r requirements.txt\")\n\n with cd(\"{0}\".format(self.app_package)):\n self.setup_settings_local()\n\n self.syncdb(django_settings)\n self.setup_gunicorn_supervisor()", "def install(package):\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package])", "def test_require_python_package(venv):\n\n from fabtools import require\n import fabtools\n\n with fabtools.python.virtualenv(venv):\n require.python.package('fabric')\n\n assert is_file(posixpath.join(venv, 'bin/fab'))", "def install():\n sudo('apt-get install python')", "def _install_dependencies(self):\n\n requirements_file = self.app_directory.joinpath('requirements.txt')\n\n package_copy_required = False\n if requirements_file.exists():\n cmd = [\n sys.executable,\n '-m',\n 'pip',\n 'install',\n '-r',\n str(requirements_file),\n '-t',\n str(self.build_directory),\n ]\n package_copy_required = True\n else:\n cmd = [\n sys.executable,\n '-m',\n 'pip',\n 'install',\n '.',\n '-t',\n str(self.build_directory),\n ]\n\n logger.debug('Running subprocess cmds: %s', cmd)\n\n try:\n _ = subprocess.run(cmd, check=True)\n except Exception:\n logger.error('Pip failed to install the app using cmd=[%s].', cmd)\n raise\n\n if package_copy_required:\n shutil.copytree(\n self.package_dir, self.build_directory.joinpath(self.package_name)\n )", "def install():\n # update apt index\n deb.update_index(quiet=False)\n\n print(green('Installing PostgreSQL and its development packages.'))\n utils.deb.install('postgresql')\n utils.deb.install('postgresql-contrib')\n utils.deb.install('libpq-dev')", "def install(self, egg, dir_path):", "def deps(ctx):\n header(deps.__doc__)\n with ctx.cd(ROOT):\n ctx.run(\n \"pip install -r requirements/develop.pip -r requirements/doc.pip\", pty=True\n )", "def miktex_install_deps():\n raise NotImplementedError", "def _install_packages(prefix, net_install):\n prefix = os.path.join(os.path.realpath(prefix), \"miniconda\")\n directory = os.path.join(sys._MEIPASS, \"packages\")\n tmp_dir = os.path.join(sys._MEIPASS, \"tmp\")\n if not os.path.exists(tmp_dir):\n os.mkdir(tmp_dir)\n\n if \"Windows\" in platform.system():\n python = os.path.join(prefix, \"python\")\n elif \"Linux\" in platform.system():\n bin_dir = os.path.join(prefix, \"bin\")\n lib_dir = os.path.join(prefix, \"lib\")\n os.putenv('PYTHONPATH', '{}:{}'.format(bin_dir, lib_dir))\n\n # Fix for the SELinux issue on the 32 bit installer\n if \"32bit\" in platform.architecture() and \"armv7l\" not in platform.machine():\n system_call(\n \" \".join([\n \"execstack\",\n \"-c\",\n os.path.join(\n lib_dir,\n \"python2.7\",\n \"lib-dynload\",\n \"_ctypes.so\"\n )\n ]),\n )\n\n python = os.path.join(prefix, \"bin\", \"python\")\n\n print(\"\\tEnsuring pip is installed\")\n system_call(\n \" \".join([python, \"-m\", \"ensurepip\"]),\n )\n\n if net_install:\n for dependency in conda_dependencies[platform.system()][platform.architecture()[0]].keys():\n print(\"### installing: {}\".format(dependency))\n system_call(\n \" \".join([\n python,\n \"-m\",\n \"conda\",\n \"install\",\n dependency,\n ]),\n )\n for dependency in pip_dependencies:\n print(\"### installing: {}\".format(dependency))\n\n system_call(\n \" \".join([\n python,\n \"-m\",\n \"pip\",\n \"install\",\n '\"{}\"'.format(dependency),\n ]),\n )\n else:\n for dependency in conda_dependencies[platform.system()][platform.architecture()[0]].keys():\n print(\"### installing: {}\".format(dependency))\n _dependency = list(\n filter(\n lambda filename: (dependency.lower() in filename.lower()) and (\".tar.bz2\" in filename),\n os.listdir(directory)\n )\n )\n _dependency = os.path.join(directory, _dependency[0])\n\n system_call(\n \" \".join([\n python,\n \"-m\",\n \"conda\",\n \"install\",\n \"--offline\",\n _dependency,\n ]),\n )\n for dependency in pip_dependencies:\n print(\"### installing: {}\".format(dependency))\n _dependency = dependency\n if \"git+\" in dependency:\n _dependency = dependency.split(\"/\")[-1].split(\"#\")[0]\n if \"mast\" in dependency:\n system_call(\n \" \".join([\n python,\n \"-m\",\n \"pip\",\n \"install\",\n \"--no-index\",\n \"--force-reinstall\",\n \"--find-links\",\n directory,\n '\"{}\"'.format(_dependency),\n ]),\n )\n else:\n system_call(\n \" \".join([\n python,\n \"-m\",\n \"pip\",\n \"install\",\n \"--upgrade\",\n \"--no-index\",\n \"--find-links\",\n directory,\n '\"{}\"'.format(_dependency),\n ]),\n )", "def install_requirements_file_with_virtualenv(self, requirements_json, ext='json'):\n self.run_fabric_task(install_requirements(requirements_json, ext))", "def test_require_virtualenv():\n\n from fabtools.require.python import virtualenv\n\n try:\n virtualenv('/tmp/venv')\n\n assert is_dir('/tmp/venv')\n assert is_file('/tmp/venv/bin/python')\n\n finally:\n run('rm -rf /tmp/venv')" ]
[ "0.79861814", "0.7786814", "0.7778329", "0.77555937", "0.76573926", "0.7628999", "0.7594286", "0.75698286", "0.744025", "0.74060243", "0.73893565", "0.7277837", "0.72283304", "0.72203624", "0.72196406", "0.7206115", "0.71840227", "0.7166442", "0.7125818", "0.706724", "0.7015596", "0.6973395", "0.6963707", "0.693457", "0.6929783", "0.6928907", "0.6926851", "0.69253856", "0.69164616", "0.6912666", "0.6911833", "0.6903974", "0.689336", "0.68730915", "0.68710166", "0.684497", "0.6844033", "0.68405926", "0.6824948", "0.6818305", "0.6808315", "0.680488", "0.677007", "0.6741363", "0.6720722", "0.6684959", "0.6684809", "0.66668725", "0.6662978", "0.66500133", "0.6643046", "0.6640292", "0.6639873", "0.6634974", "0.6625488", "0.66110426", "0.6606076", "0.6604533", "0.6579682", "0.65517884", "0.6533562", "0.6531501", "0.6463633", "0.6463266", "0.6452515", "0.6431358", "0.64256245", "0.63762665", "0.6372112", "0.6364552", "0.63619024", "0.6331171", "0.6322727", "0.6321856", "0.6321778", "0.63136697", "0.63079876", "0.63060915", "0.6303605", "0.6295062", "0.6287004", "0.6287004", "0.6281671", "0.62716466", "0.62700146", "0.6258537", "0.623327", "0.6227447", "0.62237513", "0.6222441", "0.61937606", "0.61825067", "0.61663437", "0.61625904", "0.61560017", "0.6112615", "0.60879654", "0.6086519", "0.6069949", "0.6067464" ]
0.6828054
38
Symlink to the new current release.
def symlink_current_release(): require("release", provided_by=[deploy]) with cd("%(path)s/releases" % env): sudo("ln -s %(release)s current_tmp && mv -Tf current_tmp current" % env)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def symlink():\n releases()\n env.current_path = '/root/your_project/current'\n run('rm %(current_path)s' % env)\n run('ln -s %(current_release)s %(current_path)s' % env)", "def symlink(timestamp):\n if exists(env.current_dir):\n run('rm -r %(current_dir)s' % env)\n run('ln -s %s %s' % (os.path.join(env.releases_dir, timestamp), env.current_dir))", "def createLink(self):\n \n if( self.useLink ):\n trymakedir( self.parent.installPath + \"/\" + self.alias )\n\n os.chdir( self.parent.installPath + \"/\" + self.alias )\n \n # check for already existing symlinks or dirs \n if( os.path.islink( self.version )):\n os.unlink( self.version )\n elif( os.path.isdir( self.version )):\n self.abort( \"could not create link to [ \" + self.linkPath + \" ]\\nin [ \" \\\n + os.path.basename( self.installPath ) + \" ]!!!\" )\n\n os.symlink( self.linkPath , self.version )\n print \"+ Linking \" + self.parent.installPath + \"/\" + self.alias + \"/\" + self.version \\\n + \" -> \" + self.linkPath", "def make_active(revision):\n run('ln -sfn {base}/{revision}/ {base}/newest'.format(base=BASE_PATH,\n revision=revision))", "def mklinkto(self, oldname):\n error.checked_call(os.link, str(oldname), str(self))", "def make_symlink(dbconfig, targ):\n if \"latest\" in dbconfig and not dbconfig[\"latest\"]:\n return\n link = re.sub(r'[0-9]+', 'latest', targ)\n try:\n os.symlink(targ, link)\n info(\"create link \" + link + \" --> \" + targ)\n except OSError as e:\n if e.errno == errno.EEXIST:\n os.remove(link)\n os.symlink(targ, link)\n info(\"move link \" + link + \" --> \" + targ)", "def update_current_link(self, name: str):\n lnk = self.ws_current_link\n if lnk.is_symlink():\n lnk.unlink()\n if name is not None:\n lnk.symlink_to(name)\n self.ws_config_file.touch(exist_ok=True)", "def link(self):\n\n if self.path_source is not None:\n full_source_path = os.path.join(\n os.path.expandvars(self.path_source), self.name\n )\n full_destination_path = os.path.join(\n os.path.expandvars(self.path_destination), self.name\n )\n\n try:\n if self.sudo:\n spawn.process(\n f'ln -sfv \"{full_source_path}\" \"{full_destination_path}\"',\n sudo=True,\n )\n else:\n os.symlink(full_source_path, full_destination_path)\n except FileExistsError:\n message.error(\n \"Can't symlink, file already exists at destination. Attempting fix.\"\n )\n os.remove(full_destination_path)\n message.info(f\"Removed: '{full_destination_path}'\")\n os.symlink(full_source_path, full_destination_path)\n finally:\n message.info(\n f\"Symlink created: '{full_source_path}' <--> '{full_destination_path}'\"\n )\n else:\n message.error(\n f\"'{self.name}' has no source from which to create a link from.\"\n )", "def switchRevision(deploymentPath, revision):\n with cd(deploymentPath):\n sudo('rm -f current')\n sudo('ln -s %s current' % revision)", "def _activate_new_source(self, source_dir, active_version_symlinks):\n # Switch the symlink and use our new project\n logger.info(\"Activating new source via symlinks\")\n for symlink in active_version_symlinks:\n logger.info(\"Symlinking %s\", symlink)\n symlink_dir, _ = os.path.split(symlink)\n with hide(*fab_output_hides):\n sudo('mkdir -p %s' % symlink_dir)\n sudo('rm -f %s' % symlink)\n sudo('ln -s %s %s' % (source_dir, symlink))\n\n # Clean out any stale pycs that may have been generated by queued\n # up processes that were using the old symlink\n with hide(*fab_output_hides):\n sudo('find %s -name \"*.pyc\" -delete' % source_dir)", "def update_link(self):\n try:\n relpath = os.path.relpath(self.path, os.path.dirname(self.link_path))\n os.symlink(relpath, self.link_path)\n except OSError as e:\n if e.errno == errno.EEXIST:\n os.unlink(self.link_path)\n os.symlink(self.path, self.link_path)", "def symlink(path, v=False):\r\n if not os.path.exists(path):\r\n err(path + ' : no such file or directory')\r\n elif not os.path.isdir(path):\r\n err(path + ' : not a directory')\r\n else:\r\n theme_name = os.path.basename(os.path.normpath(path))\r\n theme_path = os.path.join(_THEMES_PATH, theme_name)\r\n if os.path.exists(theme_path):\r\n err(path + ' : already exists')\r\n else:\r\n if v:\r\n print(\"Linking `{p}' to `{t}' ...\".format(p=path, t=theme_path))\r\n try:\r\n os.symlink(path, theme_path)\r\n except Exception as e:\r\n err(\"Cannot link `{p}' to `{t}':\\n{e}\".format(p=path, t=theme_path, e=str(e)))", "def ln(src, dst):\n os.symlink(src, dst)", "def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)", "def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)", "def create_symlink(src, dest):\n sudo('ln -s {} {}'.format(src, dest))", "def _create_symlink(self, source_path, main):\n main_file = os.path.realpath(os.path.join(source_path, main))\n if not os.path.isfile(main_file):\n main_file += '.js'\n if not os.path.isfile(main_file):\n print('\\tWARNING: Could not create symlink for {}, no such file.'.format(main_file))\n return\n main_file_name = os.path.basename(main_file)\n with change_working_directory(os.path.realpath(self.symlink_dir)) as cd:\n file_path = os.path.join(cd, main_file_name)\n self.created(file_path)\n if os.path.islink(file_path):\n os.remove(file_path)\n symlink(main_file, main_file_name)", "def link(self):\n \n self.__enter__()\n return self.stable_path", "def create_soft_link():\n vlogger_path = os.path.join(vlogger_dir, \"vlogger.py\")\n dir_path = os.path.expanduser(\"~\")\n bin_dir = os.path.join(dir_path, \"bin\")\n if not os.path.exists(bin_dir):\n os.mkdir(bin_dir)\n\n soft_path = os.path.join(bin_dir, \"vlogger\")\n\n if not os.path.exists(soft_path):\n command = [\"ln\", \"-s\", vlogger_path, soft_path]\n cmd_str = \" \".join(command)\n print(\"Soft link command for easy execution: {}\".format(cmd_str))\n subprocess.call([\"ln\", \"-s\", vlogger_path, soft_path])\n else:\n print(\"Soft link already created: {}\".format(soft_path))", "def command_new_version(self):\n repoinit.new_version(*self.args())", "def link(path, service_name, branch, username):\n slab_logger.log(15, 'Setting the current service to %s' % service_name)\n if service_name == \"current\":\n if os.path.isfile(os.path.join(path, \"current\")):\n currentf = open(os.path.join(path, \"current\"), 'r')\n currentf.seek(0)\n service_name = currentf.readline()\n else:\n slab_logger.error('Unable to determine the current service. '\n 'Please enter a service to work on.')\n return 1\n\n returncode = set_current_service(path, service_name)\n if not returncode == 0:\n slab_logger.error('Unable to write to \"current\" file')\n return 1\n\n if not os.path.islink(os.path.join(path, \"current_service\")):\n # Note: What to link is first arg, where to link is second aka src dest\n if os.path.isdir(os.path.join(path, \"services\", service_name)):\n os.symlink(os.path.join(path, \"services\", service_name),\n os.path.join(path, \"current_service\"))\n slab_logger.debug('Made symlink for %s' % service_name)\n return 0\n else:\n slab_logger.debug('Could not find source for symlink. '\n 'Attempting re-clone of %s.' % service_name)\n returncode = sync_service(path, branch, username, service_name)\n if returncode:\n os.symlink(os.path.join(path, \"services\", service_name),\n os.path.join(path, \"current_service\"))\n slab_logger.debug('Made symlink for %s' % service_name)\n return 0\n else:\n slab_logger.error(\"Failed to find source for symlink: \" +\n os.path.join(path, \"services\", service_name))\n return 1\n else:\n slab_logger.debug(\"Link already exists.\")\n return 0", "def version_link(self):\n release_link = url_for('data.data', selected_release=self.DATASET_RELEASE)\n return Markup(f\"<a href='{release_link}'>{self.DATASET_RELEASE}</a>\")", "def _makeSymlink ( target, source, env ) :\n if len(target) != 1 :\n fail ( \"unexpected number of targets for symlink: \"+str(target) )\n if len(source) != 1 :\n fail ( \"unexpected number of sources for symlink: \"+str(source) )\n\n target = str(target[0])\n source = str(source[0].abspath)\n trace ( \"Executing symlink `%s' -> `%s'\" % ( target, source ), \"makeSymlink\", 3 )\n\n os.symlink ( source, target )", "def redirect_version():\n return redirect(url_for(\"base_blueprint.version\"), code=301)", "def _new_release_dir(self, connection):\n release_dir_timestamp = datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S')\n commit_hash = self._get_commit_hash(connection)\n\n release_dir = f'{release_dir_timestamp}-{self.config.deployment_user}-{commit_hash}-{self.project_version}'\n print(blue(f\"Release directory set to {release_dir}\"))\n\n return release_dir", "def makeLinks(self, source, target):\n\n if os.path.exists(target): os.unlink(target)\n os.symlink(source, target)", "def promote_release(self, release):\n logger.info(\"Updating production alias with revision '{0}'\".format(\n release))\n if release.isdigit() or release == '$LATEST':\n version = release\n else:\n try:\n response = self.aws_lambda.get_alias(\n FunctionName=self.function_selected,\n Name=release\n )\n version = response['FunctionVersion']\n except self.aws_lambda.exceptions.ResourceNotFoundException:\n logger.error(\"Can't found the qualifier {0} for {1}\".format(\n release,\n self.function_selected\n ))\n return\n\n self.update_or_create_alias(version, 'production')", "def _link(filename, existing_filename):\n CreateHardLinkW(filename, existing_filename, 0)", "def __enter__(self):\n if self.stable_path:\n return self.stable_path\n \n _, file_name = os.path.split(self._source_path)\n stable_dir = tempfile.mkdtemp(prefix=\"%s-\" % file_name)\n assert self._source_path.startswith(\"/\")\n stable_path = os.path.join(stable_dir, self._source_path[1:])\n \n self.log.debug(\"Linking %s to point to %s\", stable_path, \n self._source_path)\n ensure_dir(os.path.dirname(stable_path))\n try:\n os.link(self._source_path, stable_path)\n except (EnvironmentError) as e:\n if e.errno == errno.ENOENT:\n return None\n raise\n \n self._stable_dir = stable_dir\n self.stable_path = stable_path\n return self", "def symlink(self, req, link, parent, name):\r\n self.reply_err(req, EROFS)", "def new_version(self, latest_version_id: uplink.Path(name=\"id\")):\n pass", "def symlink(self, src, dst):\n return os.symlink(src, dst)", "def update_link(self, target, dest):\n if not target:\n self.remove_link(dest)\n return\n\n reltarget = os.path.relpath(\n target, os.path.join(self.dirname, os.path.dirname(dest)))\n\n for link in self.runscript.links:\n if link[1] == dest:\n link[0] = reltarget\n break\n else:\n self.runscript.add_link(reltarget, dest)", "def _symlink(source, link_name):\n flags = 0\n\n if source is not None and os.path.isdir(source):\n flags = 1\n\n CreateSymbolicLinkW(link_name, source, flags)", "def _link_existing_install(env_path, existing_install):\n # Check that the expected S2E installation directories exist\n for dir_ in ('bin', os.path.join('share', 'libs2e')):\n if not os.path.isdir(os.path.join(existing_install, dir_)):\n raise CommandError(f'Invalid S2E installation - ``{dir_}`` does not '\n 'exist. Are you sure that this directory '\n 'contains a valid S2E installation?')\n\n logger.info('Using existing S2E installation at %s', existing_install)\n\n # Clear out anything that may exist in the new environment's install dir\n new_install = os.path.join(env_path, 'install')\n shutil.rmtree(new_install, True)\n\n # We must use an absolute path for symlinks\n os.symlink(os.path.abspath(existing_install), new_install)\n\n # We still need to clone guest-images repo, because it contains info about\n # the location of images\n guest_images_repo = CONSTANTS['repos']['images']['build']\n repos.git_clone_to_source(env_path, guest_images_repo)", "def copy_and_link(file_name):\n if os.path.normpath(output_path) != os.getcwd():\n write_to_runner(f\"mv {file_name} {output_path} \\n\")\n write_to_runner(f\"ln -s {output_path}/{file_name} . \\n\")", "def switch_to_version(self, version):\n self.current_version = version\n self.save()", "def _sync_symlink(self, binary_name, link_to):\n\n # The symlink we are creating:\n link_path = os.path.join(self.bin_dir, binary_name)\n\n # The expected file we should be linking to:\n link_dest = os.path.join(self.bin_dir, link_to)\n\n if not os.path.exists(link_path) or \\\n not os.path.islink(link_path) or \\\n os.path.realpath(link_path) != os.path.realpath(link_dest):\n if os.path.exists(link_path):\n os.remove(link_path)\n os.symlink(link_to, os.path.join(self.bin_dir, binary_name))\n self.output.append(\"Symlinked %s to %s.\" % (link_path, link_dest))\n self.changed = True", "def _symlink(conf, devname, label, remove=False):\n return\n\n linkpath = conf.get('symlink')\n if linkpath:\n linkpath = expanduser(linkpath)\n if lexists(linkpath):\n os.unlink(linkpath)\n if not remove:\n # TODO: handle path errors\n os.symlink(get_mount_target(devname, label), linkpath)", "def svn_fs_revision_link(*args):\r\n return _fs.svn_fs_revision_link(*args)", "def deploy(version):\n toolkit.readmegen(version)", "def _post_src_install_soname_symlinks(mysettings, out):\n\n\timage_dir = mysettings[\"D\"]\n\tneeded_filename = os.path.join(mysettings[\"PORTAGE_BUILDDIR\"],\n\t\t\"build-info\", \"NEEDED.ELF.2\")\n\n\tf = None\n\ttry:\n\t\tf = io.open(_unicode_encode(needed_filename,\n\t\t\tencoding=_encodings['fs'], errors='strict'),\n\t\t\tmode='r', encoding=_encodings['repo.content'],\n\t\t\terrors='replace')\n\t\tlines = f.readlines()\n\texcept IOError as e:\n\t\tif e.errno not in (errno.ENOENT, errno.ESTALE):\n\t\t\traise\n\t\treturn\n\tfinally:\n\t\tif f is not None:\n\t\t\tf.close()\n\n\tqa_no_symlink = \"\"\n\tf = None\n\ttry:\n\t\tf = io.open(_unicode_encode(os.path.join(\n\t\t\tmysettings[\"PORTAGE_BUILDDIR\"],\n\t\t\t\"build-info\", \"QA_SONAME_NO_SYMLINK\"),\n\t\t\tencoding=_encodings['fs'], errors='strict'),\n\t\t\tmode='r', encoding=_encodings['repo.content'],\n\t\t\terrors='replace')\n\t\tqa_no_symlink = f.read()\n\texcept IOError as e:\n\t\tif e.errno not in (errno.ENOENT, errno.ESTALE):\n\t\t\traise\n\tfinally:\n\t\tif f is not None:\n\t\t\tf.close()\n\n\tqa_no_symlink = qa_no_symlink.split()\n\tif qa_no_symlink:\n\t\tif len(qa_no_symlink) > 1:\n\t\t\tqa_no_symlink = \"|\".join(\"(%s)\" % x for x in qa_no_symlink)\n\t\t\tqa_no_symlink = \"^(%s)$\" % qa_no_symlink\n\t\telse:\n\t\t\tqa_no_symlink = \"^%s$\" % qa_no_symlink[0]\n\t\tqa_no_symlink = re.compile(qa_no_symlink)\n\n\tlibpaths = set(portage.util.getlibpaths(\n\t\tmysettings[\"ROOT\"], env=mysettings))\n\tlibpath_inodes = set()\n\tfor libpath in libpaths:\n\t\tlibdir = os.path.join(mysettings[\"ROOT\"], libpath.lstrip(os.sep))\n\t\ttry:\n\t\t\ts = os.stat(libdir)\n\t\texcept OSError:\n\t\t\tcontinue\n\t\telse:\n\t\t\tlibpath_inodes.add((s.st_dev, s.st_ino))\n\n\tis_libdir_cache = {}\n\n\tdef is_libdir(obj_parent):\n\t\ttry:\n\t\t\treturn is_libdir_cache[obj_parent]\n\t\texcept KeyError:\n\t\t\tpass\n\n\t\trval = False\n\t\tif obj_parent in libpaths:\n\t\t\trval = True\n\t\telse:\n\t\t\tparent_path = os.path.join(mysettings[\"ROOT\"],\n\t\t\t\tobj_parent.lstrip(os.sep))\n\t\t\ttry:\n\t\t\t\ts = os.stat(parent_path)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tif (s.st_dev, s.st_ino) in libpath_inodes:\n\t\t\t\t\trval = True\n\n\t\tis_libdir_cache[obj_parent] = rval\n\t\treturn rval\n\n\tmissing_symlinks = []\n\n\t# Parse NEEDED.ELF.2 like LinkageMapELF.rebuild() does.\n\tfor l in lines:\n\t\tl = l.rstrip(\"\\n\")\n\t\tif not l:\n\t\t\tcontinue\n\t\tfields = l.split(\";\")\n\t\tif len(fields) < 5:\n\t\t\tportage.util.writemsg_level(_(\"\\nWrong number of fields \" \\\n\t\t\t\t\"in %s: %s\\n\\n\") % (needed_filename, l),\n\t\t\t\tlevel=logging.ERROR, noiselevel=-1)\n\t\t\tcontinue\n\n\t\tobj, soname = fields[1:3]\n\t\tif not soname:\n\t\t\tcontinue\n\t\tif not is_libdir(os.path.dirname(obj)):\n\t\t\tcontinue\n\t\tif qa_no_symlink and qa_no_symlink.match(obj.strip(os.sep)) is not None:\n\t\t\tcontinue\n\n\t\tobj_file_path = os.path.join(image_dir, obj.lstrip(os.sep))\n\t\tsym_file_path = os.path.join(os.path.dirname(obj_file_path), soname)\n\t\ttry:\n\t\t\tos.lstat(sym_file_path)\n\t\texcept OSError as e:\n\t\t\tif e.errno not in (errno.ENOENT, errno.ESTALE):\n\t\t\t\traise\n\t\telse:\n\t\t\tcontinue\n\n\t\tmissing_symlinks.append((obj, soname))\n\n\tif not missing_symlinks:\n\t\treturn\n\n\tqa_msg = [\"QA Notice: Missing soname symlink(s):\"]\n\tqa_msg.append(\"\")\n\tqa_msg.extend(\"\\t%s -> %s\" % (os.path.join(\n\t\tos.path.dirname(obj).lstrip(os.sep), soname),\n\t\tos.path.basename(obj))\n\t\tfor obj, soname in missing_symlinks)\n\tqa_msg.append(\"\")\n\tfor line in qa_msg:\n\t\teqawarn(line, key=mysettings.mycpv, out=out)", "def link(self, req, ino, newparent, newname):\r\n self.reply_err(req, EROFS)", "def source_release(request, new_package):\n\n new_module, pkg_root = new_package\n source_label = random_str(40)\n source_url = \"http://{}.com/{}\".format(random_str(7), random_str(12))\n with open(os.path.join(new_module, META_NAME), \"w\") as openmeta:\n openmeta.write((\n '{{\"packages\": [\"find_packages()\"], \"source_label\": \"{}\", '\n '\"source_url\": \"{}\"}}'\n ).format(source_label, source_url))\n\n request.addfinalizer(module_cleanup)\n return new_module, source_label, source_url", "def _make_release_branch(self):\n user = getpass.getuser()\n if not user == self._user:\n raise Error('the command should only be run as user %s' % self._user)\n branch = self._branch\n # get the latest master updates\n subprocess.check_call('git remote update', shell=True)\n subprocess.check_call('git checkout master', shell=True)\n # does a git pull and updates the submodules\n GitUtil.update_submodules()\n # get the latest commit before the release is cut\n self._latest_commit = GitUtil.get_latest_commit()\n print 'Making release branch %s' % branch\n # create the new release branch\n GitUtil.create_branch(branch)\n print TermColor.ColorStr('Created remote branch %s' % branch, 'GREEN')", "def relink(f):\n if os.path.islink(f):\n linkto = os.path.join(NEW_LINK_BASE, os.path.basename(os.readlink(f)))\n #print 'Relinking %s-> %s from \\n %s' % (f, linkto, os.readlink(f))\n #print 'removing %s' % f\n os.remove(f)\n os.symlink(linkto, f)", "def fix_link(hook, target_link):\n if os.path.exists(hook):\n os.unlink(hook)\n os.symlink(target_link, hook)", "def symlink(source, target):\n source, target = map(os.path.expanduser, (source, target))\n print(\"Will symlink %s to %s\" % (source, target))\n\n if os.path.exists(target):\n if os.path.islink(target) and os.path.realpath(target) == source:\n logging.info(\"%s exists\" % target)\n return\n\n backup = target + \".old\"\n\n if os.path.exists(backup):\n raise Exception(\"Can't backup to %s: file already exists.\" % backup)\n\n shutil.move(target, backup)\n\n else:\n os.symlink(source, target)\n logging.info(\"%s symlinked to %s\" % (source, target))", "def rollback():\n current_timestamp = current()\n previous_timestamp = previous()\n\n if previous_timestamp:\n execute(symlink, *(previous_timestamp, ))\n run('rm -rf %s' % os.path.join(env.releases_dir, current_timestamp))", "def project_linkage():\n current_dir = os.getcwd()\n ve_lib = os.path.join(current_dir, 'fabric_factory', 've', 'lib')\n \n python_version = os.listdir(ve_lib).pop()\n for target_dir in [\"project\", \"worker\", \"factory\"]:\n if not os.path.islink(\n os.path.join(ve_lib, python_version,\n \"site-packages\", target_dir)):\n local('ln -s %s %s' %\n (\n os.path.join(current_dir,\"fabric_factory\", \"src\", target_dir),\n os.path.join(ve_lib, python_version,\n \"site-packages\", target_dir)\n )\n )\n else:\n print 'link to %s already exists' %target_dir", "def create_release(self, alias=\"devel\"):\n self.create_package(alias)\n\n self.upload_package()\n\n logger.info(\"Creating release {0}\".format(self.hash_release))\n\n response_code = self.aws_lambda.update_function_code(\n FunctionName=self.function_selected,\n S3Bucket=self.function_config['Code']['S3Bucket'],\n S3Key=self.s3_filename,\n Publish=True\n )\n\n logger.info(\"Created revision {0}\".format(response_code['Version']))\n\n self.update_or_create_alias(response_code['Version'], self.hash_release)\n self.update_or_create_alias(response_code['Version'], alias)\n\n logger.info(\"If config wash changed, remember to update function \"\n \"configuration\")", "def link(self):\n\t\tadiff = ApplicationDifferencer()\n\n\t\t# Determine the differences between what's in the\n\t\t# application's directory and what's currently\n\t\t# available from the root filesystem (in relation\n\t\t# to this application).\n\t\tresults = adiff.scan(\n\t\t\t\tos.path.join(\n\t\t\t\t\tAppFolders.get(self.type),\n\t\t\t\t\tself.name + \"/\" + self.version\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\n\t\t# Now go through the results, creating directories and\n\t\t# symlinking files where required.\n\t\tattempt_successes = list()\n\t\tattempt_failures = list()\n\t\tattempt_exists = list()\n\t\ttotal_files = 0\n\t\tfor i in results:\n\t\t\ttotal_files += 1\n\n\t\t\t# Normalize (replace version number with\n\t\t\t# \"Current\") the application path.\n\t\t\tsv = self.getNormalizedApplicationPath(self.version, i[1])\n\t\t\ti = (i[0], sv, i[2])\n\n\t\t\tif (i[0] == \"directory\"):\n\t\t\t\ttry:\n\t\t\t\t\tself.oper_mkdir(i[2], 0755)\n\t\t\t\t\tattempt_successes.append(i[2])\n\t\t\t\texcept:\n\t\t\t\t\tlog.showErrorW(\"Unable to create directory \" + i[2])\n\t\t\t\t\tattempt_failures.append(i[2])\n\t\t\telif (i[0] == \"file\"):\n\t\t\t\ttry:\n\t\t\t\t\tself.oper_symlink(i[1], i[2])\n\t\t\t\t\tattempt_successes.append(i[2])\n\t\t\t\texcept:\n\t\t\t\t\tlog.showErrorW(\"Unable to symlink file \" + i[2])\n\t\t\t\t\tattempt_failures.append(i[2])\n\t\t\telif (i[0] == \"exists\"):\n\t\t\t\tattempt_exists.append(i[2])\n\t\t\telse:\n\t\t\t\tlog.showWarningW(\"Unknown operation for \" + i[1])\n\t\t\n\t\treturn attempt_successes, attempt_failures, total_files", "def repo_link(repo):\n return \"https://github.com/\" + repo", "def __upgrade_install__(path, release):\n install = Popen([\"freebsd-update\", \"-b\", path, \"-d\",\n \"{}/var/db/freebsd-update/\".format(path), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(path), \"-r\",\n release, \"install\"], stderr=PIPE)\n install.communicate()\n\n return install.returncode", "def release(ctx, sdist=True, wheel=True, sign=True, dry_run=False):\n # Build docs first. Use terribad workaround pending invoke #146\n ctx.run(\"inv docs\", pty=True, hide=False)\n # Move the built docs into where Epydocs used to live\n target = 'docs'\n rmtree(target, ignore_errors=True)\n # TODO: make it easier to yank out this config val from the docs coll\n copytree('sites/docs/_build', target)\n # Publish\n publish(ctx, sdist=sdist, wheel=wheel, sign=sign, dry_run=dry_run)\n # Remind\n print(\"\\n\\nDon't forget to update RTD's versions page for new minor \"\n \"releases!\")", "def make_release_folders(dirname):\n require('hosts', 'project_path', provided_by=envs.ENVS)\n with cd(env.project_path):\n with cd(dirname):\n run('mkdir -p logs releases packages')\n with cd('releases'):\n run('touch none')\n run('test ! -e current && ln -s none current', quiet=True)\n run('test ! -e previous && ln -s none previous', quiet=True)", "def _switchBranch(self, release):\n if release is None:\n self.branch = None\n self.branch_dir = None\n log.info('No release branch available')\n else:\n self.wc.update()\n assert self.wc.exists('branches/' + release)\n io.linesToFile(self.path(self.BRANCH_FILE), [release])\n self.branch = release\n self.branch_dir = 'branches/' + release\n self.wc.update(self.branch_dir, depth='infinity')\n log.info('Working on branch ' + self.branch)", "def symlink(origin, target):\n # Skip anything in the home directory if the user is admin\n if user_is_admin() and not args.root and check_contain_home_dir(target):\n print(highlight_colour(\"'%s'\") % str(target) +\n warning_colour(\" is inside of home folder. Skipping...\"))\n raise StopTraversing(\"Skipping.\")\n\n # Check for a broken symlink, if true: prompt for replacement.\n # This is done to avoid having any broken symlinks lingering.\n if is_broken_symlink(target):\n if args.yes or prompt(origin, target, \"remove\"):\n target.unlink()\n else:\n return\n\n if args.replace:\n replace_symlink(origin, target)\n elif args.remove:\n remove_symlink(origin, target)\n else:\n create_symlink(origin, target)", "def __set_release(self, project):\r\n release = project.session.create(self._config['release'])\r\n _logger.info(\"Current release: '%s'\" % project.release)\r\n _logger.info(\"Configuration release: '%s'\" % release)\r\n if project.release != release:\r\n _logger.info(\"Updating release on the project hierarchy.\")\r\n for subp in [project] + project.subprojects:\r\n subp.release = release", "def update_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n vprint ('Fast-forwarding', name, 'to', main_branch)\n ex (\"cd $DOC_ROOT/\" + product + \" && git fetch . \" + main_branch + \":\" + name)", "def do_release(self, version):\n build_dir = self.options.buildroot\n patch_dir = self.options.patch_dir\n\n # variables related to the version\n prev_version = version.prev_version\n\n # If we're operating in the same repo as this script, kindly make it\n # in a subdirectory to avoid polluting things\n if build_dir == os.path.dirname(os.path.abspath(__file__)):\n build_dir = os.path.join(build_dir, 'build')\n\n if not os.path.exists(build_dir):\n logging.debug('Creating build dir: %s', build_dir)\n os.mkdir(build_dir)\n\n os.chdir(build_dir)\n\n package = 'mediawiki-' + version.raw\n package_dir = os.path.join(build_dir, package)\n\n # Export the target. If we're going to patch later, use the branch\n if patch_dir:\n get_git(package_dir, version.branch)\n else:\n get_git(package_dir, version.tag)\n\n if patch_dir:\n maybe_apply_patches(\n package,\n get_patches_for_repo(patch_dir, 'core', version.branch))\n maybe_apply_patches(\n os.path.join(package, 'vendor'),\n get_patches_for_repo(patch_dir, 'vendor', version.branch))\n\n ext_exclude = []\n for ext in get_skins_and_extensions(package_dir):\n if patch_dir:\n maybe_apply_patches(\n os.path.join(package, ext),\n get_patches_for_repo(patch_dir, ext, version.branch))\n ext_exclude.append(\"--exclude\")\n ext_exclude.append(ext)\n\n # Generate the .tar.gz files\n out_files = [\n self.make_tar(\n package=package,\n input_dir=package,\n build_dir=build_dir),\n self.make_tar(\n package='mediawiki-core-' + version.raw,\n input_dir=package,\n build_dir=build_dir,\n add_args=ext_exclude)\n ]\n\n # Patch\n if not self.options.no_previous and prev_version is not None:\n prev_dir = 'mediawiki-' + prev_version\n get_git(os.path.join(build_dir, prev_dir),\n MwVersion(prev_version).tag)\n\n self.make_patch(\n build_dir, package + '.patch.gz', prev_dir, package, 'normal')\n out_files.append(package + '.patch.gz')\n logging.debug('%s.patch.gz written', package)\n if os.path.exists(os.path.join(package, 'languages', 'messages')):\n i18n_patch = 'mediawiki-i18n-' + version.raw + '.patch.gz'\n if (self.make_patch(\n build_dir, i18n_patch, prev_dir, package, 'i18n')):\n out_files.append(i18n_patch)\n logging.info('%s written', i18n_patch)\n else:\n i18n_patch = None\n\n # Sign\n for file_name in out_files:\n if self.options.sign:\n try:\n proc = subprocess.Popen([\n 'gpg', '--detach-sign',\n os.path.join(build_dir, file_name)])\n except OSError as ose:\n logging.error(\"gpg failed, does it exist? Skip with \" +\n \"--dont-sign.\")\n logging.error(\"Error %s: %s\", ose.errno, ose.strerror)\n sys.exit(1)\n if proc.wait() != 0:\n logging.error(\"gpg failed, exiting\")\n sys.exit(1)\n output(version, out_files)\n return 0", "def publish_release(ctx):\n rel = _get_release()\n rel.update_release(rel.title, rel.raw_data[\"body\"], draft=False)", "def test_create_symlink_file(self):\n pass", "def copyAndLinkConfig(config):\n\n basename = os.path.basename(config)\n new_config_path = os.path.join(basedefs.DIR_CONFIG, basename)\n\n # Verify destination dir exists, create it if necessary\n if not os.path.isdir(basedefs.DIR_CONFIG):\n try:\n logging.debug(\"Creating ovirt-engine config directory\")\n os.makedirs(basedefs.DIR_CONFIG)\n except:\n logging.error(traceback.format_exc())\n raise Exception(output_messages.ERR_EXP_FAILED_CREATE_RHEVM_CONFIG_DIR % basedefs.DIR_CONFIG)\n\n # Verify original config is not already linked\n if os.path.islink(config):\n if (os.readlink(config) == new_config_path):\n logging.debug(\"%s is already linked to %s\"%(config, new_config_path))\n return(os.path.join(basedefs.DIR_CONFIG, basename))\n else:\n raise Exception(output_messages.ERR_EXP_LINK_EXISTS%(config, new_config_path))\n\n # Verify original config is a normal file, and copy it to the new location\n elif os.path.isfile(config):\n try:\n utils.copyFile(config, basedefs.DIR_CONFIG)\n\n # Remove old file\n logging.debug(\"Removing %s\" %(config))\n os.remove(config)\n\n # Linking\n logging.debug(\"Linking %s to %s/%s\" %(config, basedefs.DIR_CONFIG, config))\n os.symlink(new_config_path, config)\n except:\n logging.error(traceback.format_exc())\n raise Exception(output_messages.ERR_EXP_CPY_RHEVM_CFG % (config, \"%s/%s\" % (basedefs.DIR_CONFIG, config)))\n # return new path\n return new_config_path", "def releases():\n r = run('ls -x %(releases_path)s' % env)\n env.releases = sorted(r.split(\"\\t\"))\n if len(env.releases) >= 1:\n env.current_revision = env.releases[-1]\n env.current_release = '%(releases_path)s/%(current_revision)s' % env\n if len(env.releases) > 1:\n env.previous_revision = env.releases[-2]\n env.previous_release = '%(releases_path)s/%(previous_revision)s' % env\n\n #cleanup old releases. max 3 allowed.\n cleanup()", "def states(c):\n release_dir = utils.new_release(\n conn, deploy_root=SALT_DEPLOY_PATH, in_repo_path=\"root\", branch=SALT_BRANCH\n )\n utils.promote_release_to_current(\n conn, deploy_root=SALT_DEPLOY_PATH, release_dir=release_dir\n )", "def create_release(ctx):\n # Get the head of master\n r = _get_repo()\n b = r.get_branch(branch=\"master\")\n head = b.commit\n\n faasm_ver = get_faasm_version()\n\n # Create a tag from the head\n tag_name = _tag_name(faasm_ver)\n r.create_git_tag(\n tag_name,\n \"Release {}\\n\".format(faasm_ver),\n head.sha,\n \"commit\",\n )\n\n r.create_git_release(\n tag_name,\n \"Faasm {}\".format(faasm_ver),\n \"Release {}\\n\".format(faasm_ver),\n draft=True\n )", "def release(filepath, github_account, force=False):\n repo = _git.clone_from_github(\n _REPO_PATH, join(filepath, _REPO_NAME), github_account=github_account)\n latest_tag = repo.latest_tag()\n version = _Version(latest_tag)\n if not _common.check_prerelease(repo, latest_tag, github_account, force):\n return\n _run_tests(repo)\n version.bump_minor()\n new_version = str(version)\n repo.tag(new_version)\n repo.push(tags=True)", "def link_snapshot(argstr):\n pass", "def upgrade_project(ctx, path):\n with ctx.cd(path):\n ctx.run(\"newt upgrade\")", "def relink():\n _intro()\n from . import crosslink as cr\n\n cr.relink()", "def ln_overwrite(src, dest):\n if exists(dest, use_sudo=True):\n sudo(\"rm %s && ln -s %s %s\" % (dest, src, dest))\n else:\n sudo(\"ln -s %s %s\" % (src, dest))", "def link(target, link_name):\n src = os.path.abspath(target)\n dst = os.path.abspath(link_name)\n os.symlink(src, dst)", "def create_symbolic_link(file, target):\n try:\n os.symlink(file, target)\n except NotImplementedError:\n logger.critical(\"Symbolic links not supported on this platform\")\n raise\n except OSError:\n logger.critical(\"Not sufficient permissions\")\n raise", "def install_baseos(self):\n\n # Check that DFT path is valid\n if not os.path.isdir(self.project.project_definition[\"configuration\"][\"dft-base\"]):\n logging.critical(\"Path to DFT installation is not valid : %s\",\n self.project.project_definition[\"configuration\"][\"dft-base\"])\n exit(1)\n\n # Ensure target rootfs mountpoint exists and is a dir\n if not os.path.isdir(self.project.rootfs_mountpoint):\n os.makedirs(self.project.rootfs_mountpoint)\n else:\n if (\"keep_rootfs_history\" in self.project.project_definition[\"configuration\"] and\n self.project.project_definition[\"configuration\"][\"keep_rootfs_history\"]):\n logging.warn(\"target rootfs mount point already exists : \" + self.project.rootfs_mountpoint)\n# TODO\n logging.critical(\"TODO : handle history : \" + self.project.rootfs_mountpoint)\n exit(1)\n# It looks like i need to add a symlink from history to current\n# It should be optional with overwrite on factory_setup_definition\n# Depending on keeping history or not. So far not available\n# default behavior is not to keep history\n else:\n\n# TODO security hole !!!!!\n# Protect path generation to avoid to remove / !!!\n sudo_command = 'sudo rm -fr \"' + self.project.rootfs_mountpoint +'\"'\n self.execute_command(sudo_command)\n os.makedirs(self.project.rootfs_mountpoint)\n\n # Check if the archive has to be used instead of doing a debootstraping\n # for real. Only if the archive exist...\n if self.project.dft.use_cache_archive and self.cache_archive_is_available:\n self.fake_generate_debootstrap_rootfs()\n else:\n # In any other cases, do a real debootstrap call\n self.generate_debootstrap_rootfs()\n\n # Test if the archive has to be updated\n if self.project.dft.update_cache_archive:\n # But only do it if we haven't bee using the cache, or it\n # would be extracted, then archived again.\n if self.project.dft.use_cache_archive:\n self.update_rootfs_archive()\n\n # Launch Ansible to install roles identified in configuration file\n self.install_packages()\n\n # Once installation has been played, we need to do some cleanup\n # like ensute that no mount bind is still mounted, or delete the\n # DFT ansible files\n self.cleanup_installation_files()\n\n # Remove QEMU if it has been isntalled. It has to be done in the end\n # since some cleanup tasks could need QEMU\n if self.use_qemu_static:\n self.cleanup_qemu()", "def deploy_go_app(app_name, uri):\n execute(local_fetch_s3_artifact, uri)\n execute(deploy_artifact, app_name, uri)\n execute(create_symlink,\n '{}/config/config.yaml'.format(get_app_basedir(app_name)),\n '{}/etc/config.yaml'.format(get_current_release_dir(app_name)))", "def postreleaser_before(data):\n\n data['dev_version_template'] = '%(new_version)s.dev'", "def set_note_version(cls, version):\n #Change current working directory to root sdk directory\n Utility.pushd(Settings.rootSdkPath)\n cls.init()\n notes_file = 'releases.txt'\n note = cls.get_note(notes_file)\n if note is not False:\n new_note = '---------------------------------------------------------------------\\n' + \\\n 'Version: ' + version + '\\n' + \\\n '---------------------------------------------------------------------\\n'\n if os.path.isfile(notes_file):\n with open(notes_file,\"r\") as src:\n all_notes=src.readlines()\n if '--------------------------------------------' not in all_notes[0]:\n all_notes.insert(0,new_note)\n else:\n all_notes = new_note\n\n with open(notes_file, 'w') as release_notes:\n release_notes.writelines(all_notes)\n cls.logger.info(\"Release notes vesion set: \" + version)\n # return to the base directory\n Utility.popd()", "def main(self):\n logging.info(\"Doing release for %s\", self.version.raw)\n\n if self.version.branch is None:\n logging.debug(\"No branch, assuming '%s'. Override with --branch.\",\n self.options.branch)\n self.version.branch = self.options.branch\n\n # No version specified, assuming a snapshot release\n if self.options.version is None:\n self.do_release(\n version=MwVersion.new_snapshot(self.options.branch))\n return 0\n\n if self.options.previousversion:\n # Given the previous version on the command line\n self.do_release(version=self.version)\n return 0\n\n no_previous = False\n if self.version.prev_version is None:\n no_previous = True\n if not self.ask(\"No previous release found. Do you want to make a \"\n \"release with no patch?\"):\n logging.error('Please specify the correct previous release ' +\n 'on the command line')\n return 1\n if no_previous or self.options.no_previous:\n self.do_release(version=self.version)\n else:\n if not self.ask(\"Was %s the previous release?\" %\n self.version.prev_version):\n logging.error('Please specify the correct previous release ' +\n 'on the command line')\n return 1\n\n self.do_release(version=self.version)\n return 0", "def test_changeVersionsWithPrerelease(self):\n self._testVersionChanging(9, 2, 7, 38)", "def set_current_instance(vhost_path, instance_path):\n\n with cd(vhost_path):\n commands.delete('./previous_instance')\n\n if exists('./current_instance'):\n commands.rename('./current_instance', './previous_instance')\n\n commands.create_symbolic_link(instance_path, './current_instance')", "def test_history_import_symlink():\n with HistoryArchive() as history_archive:\n history_archive.write_metafiles()\n history_archive.write_link('datasets/Pasted_Entry_1.txt', '../target.txt')\n history_archive.write_file('target.txt', 'insecure')\n _run_jihaw_cleanup_check_secure(history_archive, 'Symlink dataset in import archive allowed')", "def menu_python_package_index(self, event=None):\n self.link('http://www.python.org/pypi')", "def _rollback_releaseinfo_file(projname):\n dirs = projname.split('.')\n os.chdir(os.path.join(*dirs))\n print 'rolling back releaseinfo.py for %s' % projname\n os.system('git checkout -- releaseinfo.py')", "def update_snapshot(distribution: str):\n snapshot = f'distribution-{VERSION}'\n run_cmd(['aptly', 'snapshot', 'create',\n snapshot, 'from', 'repo', distribution])\n run_cmd(['aptly', 'publish', 'switch', '-batch', distribution, snapshot])", "def link(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n abs_src = self._rootjoin(src)\r\n abs_dst = os.path.join(self.chroot, dst)\r\n try:\r\n os.link(abs_src, abs_dst)\r\n except OSError as e:\r\n if e.errno == errno.EEXIST:\r\n # File already exists, skip\r\n pass\r\n elif e.errno == errno.EXDEV:\r\n # Hard link across devices, fall back on copying\r\n shutil.copyfile(abs_src, abs_dst)\r\n else:\r\n raise", "def link(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n abs_src = self._rootjoin(src)\r\n abs_dst = os.path.join(self.chroot, dst)\r\n try:\r\n os.link(abs_src, abs_dst)\r\n except OSError as e:\r\n if e.errno == errno.EEXIST:\r\n # File already exists, skip\r\n pass\r\n elif e.errno == errno.EXDEV:\r\n # Hard link across devices, fall back on copying\r\n shutil.copyfile(abs_src, abs_dst)\r\n else:\r\n raise", "def get_timestamped_dir(path, name=None, link_to_latest=False):\n current_time = strftime(\"%y-%m-%d/%H-%M-%S\", gmtime())\n dir = path + \"/\" + current_time + \"/\"\n if not os.path.exists(dir):\n os.makedirs(dir)\n if name is not None:\n if os.path.exists(path + \"/\" + name):\n os.remove(path + \"/\" + name)\n os.symlink(current_time, path + \"/\" + name, target_is_directory=True)\n if link_to_latest:\n if os.path.exists(path + \"/latest\"):\n os.remove(path + \"/latest\")\n os.symlink(current_time, path + \"/latest\", target_is_directory=True)\n return dir", "def bump_oa_release_number(**kwargs):\n\n oa_folder = kwargs['workdir'] + '/openstack-ansible/'\n fpth, cver = get_oa_version(oa_folder)\n LOGGER.info(\"Current version {} in {}\".format(cver, fpth))\n\n if cver == \"master\":\n click.confirm(\"Master should only changed when necessary. Sure?\")\n\n if kwargs['version'] == \"auto\":\n LOGGER.info(\"Guessing next version\")\n cver_l = cver.split(\".\")\n try:\n cver_l[-1] = str(int(cver_l[-1]) + 1)\n except ValueError as vee:\n LOGGER.error(\"Cannot up the version: {}\".format(vee))\n nver = click.prompt(\"New version?\")\n else:\n nver = \".\".join(cver_l)\n else:\n nver = kwargs['version']\n\n for line in fileinput.input(\"{}/{}\".format(oa_folder, fpth),\n inplace=True):\n print(line.replace(\n \"openstack_release: {}\".format(cver),\n \"openstack_release: {}\".format(nver))),\n LOGGER.info(\"Updated the version in repo to {}\".format(nver))\n\n msg = (\"Here is a commit message you could use:\\n\"\n \"Update all SHAs for {new_version}\\n\\n\"\n \"This patch updates all the roles to the latest available stable \\n\"\n \"SHA's, copies the release notes from the updated roles into the \\n\"\n \"integrated repo, updates all the OpenStack Service SHA's, and \\n\"\n \"updates the appropriate python requirements pins. \\n\\n\"\n \"Depends-On: {release_changeid}\").format(\n new_version=os.environ.get('new_version', nver),\n release_changeid=os.environ.get('release_changeid', '<TODO>'))\n\n if kwargs['commit']:\n repo = Repo(oa_folder)\n repo.git.add('.')\n repo.index.commit(msg)\n click.echo(\"Commit done. Please verify before review.\")\n else:\n click.echo(msg)", "def test_patch_pci_link(self):\n pass", "def symlink(target, path):\n unlink(path)\n path = os.path.realpath(path)\n target = os.path.relpath(os.path.realpath(target), os.path.dirname(path))\n logging.info('Symlinking %s -> %s', path, target)\n os.symlink(target, path)", "def new_realpath(name):\n if name.startswith('link-to-ham'):\n return name[len('link-to-'):]\n else:\n return name", "def link(self, src, dst, label=None):\n self._tag(dst, label)\n self._mkdir_for(dst)\n abs_src = self._rootjoin(src)\n abs_dst = os.path.join(self.chroot, dst)\n try:\n os.link(abs_src, abs_dst)\n except OSError as e:\n if e.errno == errno.EEXIST:\n # File already exists, skip\n pass\n elif e.errno == errno.EXDEV:\n # Hard link across devices, fall back on copying\n shutil.copyfile(abs_src, abs_dst)\n else:\n raise", "def switch_to_latest_version(self):\n self.current_version = Version.objects.filter(is_published=True).latest()\n self.save()", "def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url", "def switch(self, url):\r\n self._authsvn('switch', [url])", "def _symlink_tar(self):\r\n outsidep = self.unsafe_common_dir / \"unsafe_file.txt\"\r\n symlinkp = self.unsafe_common_dir / \"symlink.txt\"\r\n symlink_tar = self.unsafe_common_dir / \"symlink.tar.gz\"\r\n outsidep.symlink(symlinkp)\r\n with tarfile.open(symlink_tar, \"w:gz\") as tar:\r\n tar.add(symlinkp)\r\n\r\n return symlink_tar", "def do_deploy(archive_path):\n if not archive_path:\n return False\n if not os.path.exists(archive_path):\n return False\n\n filename = archive_path.split(\"/\")[-1]\n put(archive_path, \"/tmp/{}\".format(filename))\n\n run(\"sudo mkdir -p /data/web_static/releases/{}\".format(filename))\n run(\"sudo tar -xzf /tmp/{} -C /data/web_static/releases/{}\"\n .format(filename, filename))\n run(\"sudo rm /tmp/{}\".format(filename))\n run(\"sudo mv /data/web_static/releases/{}/web_static/*\"\n \" /data/web_static/releases/{}\"\n .format(filename, filename))\n run(\"sudo rm -rf /data/web_static/releases/{}/web_static\"\n .format(filename))\n run(\"sudo rm -rf /data/web_static/current\")\n run(\"sudo ln -s /data/web_static/releases/{}/ /data/web_static/current\"\n .format(filename))\n print(\"New version successfully deployed!\")", "def create_symlink(source_file, dest_file, sudo=True):\n LOG.info(\"Creating symlink to {} called {}\".format(source_file, dest_file))\n cmd = \"ln -sf {} {}\".format(source_file, dest_file)\n _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False)", "def mark_explicit(\n run_directory: Union[str, Path],\n version_root: Union[str, Path],\n link_name: Union[str, Path],\n) -> None:\n run_directory = Path(run_directory).resolve()\n version_root = Path(version_root).resolve()\n link_file = version_root / link_name\n move_link(link_file, run_directory)" ]
[ "0.85483825", "0.7888362", "0.6896718", "0.6580014", "0.640067", "0.6382023", "0.609916", "0.59879214", "0.598062", "0.58782184", "0.5851017", "0.5815926", "0.5792104", "0.57686156", "0.57686156", "0.57261956", "0.57167125", "0.5602009", "0.55953795", "0.55733645", "0.5570685", "0.5562101", "0.5553313", "0.55281484", "0.5523766", "0.5522215", "0.54879534", "0.54795873", "0.54761964", "0.54414445", "0.5437991", "0.54291445", "0.5413189", "0.54090744", "0.53984374", "0.5389665", "0.53641504", "0.5361378", "0.5356297", "0.53495", "0.5345681", "0.53140235", "0.5311181", "0.5293252", "0.5253514", "0.52471614", "0.52404857", "0.5239085", "0.5237054", "0.52060974", "0.5196984", "0.5192382", "0.51742226", "0.5163958", "0.5149868", "0.51376784", "0.5132464", "0.5128551", "0.5128479", "0.51203513", "0.5116557", "0.5115112", "0.50902104", "0.50866467", "0.50853956", "0.5080756", "0.5073846", "0.5072117", "0.503898", "0.5037433", "0.5030075", "0.5026537", "0.50205934", "0.5012392", "0.5010416", "0.50096655", "0.5008956", "0.5006425", "0.50049925", "0.49995002", "0.4999434", "0.49914098", "0.49901676", "0.49853137", "0.4983465", "0.4980041", "0.4980041", "0.49790722", "0.4958174", "0.49504828", "0.49477333", "0.49447346", "0.49438816", "0.4943283", "0.49318382", "0.49304545", "0.49280867", "0.49077722", "0.48984453", "0.48983532" ]
0.8182266
1
Remove older releases, keeping the last `keep_num` intact.
def cleanup(keep_num=5): keep_num = int(keep_num) assert keep_num > 0, "[ERROR] keep_num must be > 0; refusing to proceed." with cd("%(path)s/packages" % env): package_files = sorted(run("ls -1").split()) package_files = [_.replace(".tar.gz", "") for _ in package_files] with cd("%(path)s/releases" % env): release_files = sorted(run("ls -1").split()) release_files.remove('current') diff = set(package_files).symmetric_difference(set(release_files)) if diff: raise Exception("[ERROR]: Package and release directories are out of sync;" " refusing to proceed. Please fix this difference manually: %s" % diff) package_files = package_files[:-keep_num] release_files = release_files[:-keep_num] with cd("%(path)s/packages" % env): [sudo("rm %s.tar.gz" % _) for _ in package_files] with cd("%(path)s/releases" % env): [sudo("rm -r %s" % _) for _ in release_files]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _deleteOldVersionsByAge(self, model, max_age, number_to_keep=None):\r\n adapter = getVersionManagementAdapter(model)\r\n\r\n version_ids = self._getOldVersionIds(adapter)\r\n if number_to_keep is not None:\r\n if len(version_ids) < number_to_keep:\r\n return\r\n version_ids = version_ids[:-number_to_keep]\r\n\r\n then = datetime.now() - timedelta(days=max_age)\r\n oldest_time = DateTime(then.isoformat())\r\n\r\n index = None\r\n for id in version_ids:\r\n if adapter.getVersionModificationTime(id) >= oldest_time:\r\n break\r\n index = version_ids.index(id)\r\n\r\n delete_ids = []\r\n if index is not None:\r\n delete_ids = version_ids[:index]\r\n self._removed += len(delete_ids)\r\n model.manage_delObjects(delete_ids)", "def do_clean(number=0):\n res = run(\"ls /data/web_static/releases\")\n\n number = int(number)\n list_names = str(res).split()\n date_list = []\n delete_list = []\n patt1 = re.compile(r'web_static_\\d{14}')\n for name in list_names:\n if re.fullmatch(patt1, name):\n date_list.append(int(name[11:]))\n else:\n delete_list.append(name)\n\n for elem in delete_list:\n run(\"rm -Rf /data/web_static/releases/\" + elem)\n\n if number == 0:\n list_names.remove(\"web_static_\" + str(max(date_list)))\n else:\n for _ in range(0, number):\n newer = max(date_list)\n list_names.remove(\"web_static_\" + str(newer))\n date_list.remove(newer)\n\n for names in list_names:\n run(\"rm -Rf /data/web_static/releases/\" + names)\n\n res = local(\"ls versions\")\n version_names = str(res).split()\n delete_list = []\n patt2 = re.compile(r'web_static_\\d{14}\\.tgz')\n for name in version_names:\n if re.fullmatch(patt2, name) is None:\n delete_list.append(name)\n for names in delete_list:\n local(\"rm -Rf versions/\" + names)\n for names in list_names:\n local(\"rm -Rf versions/\" + names + \".tgz\")", "def prune(c):\n with conn.cd(utils.join(SALT_DEPLOY_PATH, utils.DEPLOY_RELEASES_DIR)):\n releases = [\n d.replace(\"./\", \"\").strip()\n for d in conn.run(\"find . -maxdepth 1 -mindepth 1 -type d\", pty=True)\n .stdout.strip()\n .split(\"\\n\")\n ]\n releases.sort()\n\n diff = len(releases) - int(SALT_KEEP_RELEASES)\n print(\n f\"Found {len(releases)} current releases; set to keep {SALT_KEEP_RELEASES}\"\n )\n if diff > 0:\n to_delete = releases[:diff]\n print(f\"Cleaning up {len(to_delete)} old release(s)\")\n conn.run(f\"rm -rf {' '.join(to_delete)}\")\n else:\n print(\"Nothing to do\")", "def Remove(self, version_number):\n self.dict.pop(str(version_number))", "def purge_old() -> None:\n conn = sqlite3.connect('rss.db')\n c = conn.cursor()\n c.execute(\n \"\"\"delete from entries where title not in\n (select title from entries order by year desc, month desc, day desc\n limit 20)\n \"\"\"\n )\n conn.commit()\n conn.close()", "def removed_pkgs():\n name_versions = defaultdict(set)\n fedoras = py2_pkgs()\n last_fedoras = defaultdict(set)\n new = {pkg.name for pkg in repoquery(all=True)}\n for version in fedoras:\n for name_evr in set(fedoras[version]):\n name, _, evr = name_evr.partition(' ')\n if name not in new:\n name_versions[name].add(evr)\n last_fedoras[version].add(name)\n max_versions = {name: max(versions, key=SortableEVR)\n for name, versions in name_versions.items()}\n return last_fedoras, max_versions", "def _cleanup_removed_versions(self, consumer, versions):\n prev_resource_types = set(\n self._versions_by_consumer[consumer].keys())\n cur_resource_types = set(versions.keys())\n removed_resource_types = prev_resource_types - cur_resource_types\n if removed_resource_types:\n LOG.debug(\"Removing stale tracked versions: %s\",\n removed_resource_types)\n for resource_type in removed_resource_types:\n self._set_version(consumer, resource_type, None)", "def releases(releaser, count):\n releases = sorted(\n releaser.get_releases().values(),\n key=lambda rel: rel[\"end_timestamp\"],\n reverse=True,\n )\n click.echo(f\"Latest {count} releases:\")\n for release in releases[:count]:\n click.echo(f'{release[\"end_timestamp\"]} {release[\"commit\"]}')", "def get_outdated_containers(prefix, num_to_keep=2):\n most_recent = []\n\n for container_name in pyrax.cloudfiles.list_containers():\n if container_name.startswith(prefix):\n container = pyrax.cloudfiles.get_container(container_name)\n last_modified = get_container_last_modified(container)\n\n if last_modified:\n most_recent.append((last_modified, container))\n\n most_recent.sort()\n most_recent.reverse()\n\n if len(most_recent) > num_to_keep:\n yield most_recent.pop()", "def delete_version(self):\n pass", "def manage_addVersionCleanUp(self, id, max_age, number_to_keep=None, pub_path=None, REQUEST=None):\r\n if not Id(self, id).isValid():\r\n return\r\n object = VersionCleanUp(id, max_age, number_to_keep, pub_path)\r\n self._setObject(id, object)\r\n add_and_edit(self, id, REQUEST, 'manage_workspace')\r\n return ''", "def clean_old_backups(self, encrypted=None, compressed=None,\n content_type=None, database=None,\n keep_number=None):\n if keep_number is None:\n keep_number = settings.CLEANUP_KEEP if content_type == 'db' \\\n else settings.MEDIA_FILENAME_TEMPLATE\n files = self.list_backups(encrypted=encrypted, compressed=compressed,\n content_type=content_type, database=database)\n files = sorted(files, key=utils.filename_to_date, reverse=True)\n files_to_delete = [fi for i, fi in enumerate(files) if i >= keep_number]\n for filename in files_to_delete:\n self.delete_file(filename)", "def gc_deploys(n = 10):\n for deploypath in [env.basepath, env.nodejs]:\n with cd(\"%s/releases\" % deploypath):\n files = run(\"ls -1t\").splitlines()\n older_files = files[n:]\n if len(older_files) > 0:\n puts(yellow(\"Removing older deploys: %s\" % \", \".join(older_files)))\n for file in older_files:\n run(\"rm -fr %s\" % file)", "def unkeep(self, *args):\n self.__execute(self.pkgin_bin, \"unkeep\", *args)", "def get_releases(is_vertebrate: bool):\n url = \"http://ftp.ensemblgenomes.org/pub?\"\n if is_vertebrate:\n url = \"http://ftp.ensembl.org/pub?\"\n ret = retry(requests.get, 3, url)\n # sort releases new to old\n releases = sorted(\n [int(i) for i in re.findall(r'\"release-(\\d+)/\"', ret.text)],\n reverse=True,\n )\n if is_vertebrate:\n # ignore immature releases\n releases = [r for r in releases if r > 46]\n return releases", "def keep(self, *args):\n self.__execute(self.pkgin_bin, \"keep\", *args)", "def remove_training_reserves():\n reserves = TrainingReserve.objects.all()\n now = timezone.now()\n for reserve in reserves:\n if reserve.date < now:\n reserve.delete()", "def _gc(self):\n remove_before = time.time() - self._keep_for\n for item in self._queue:\n # Time for the sequence to be removed?\n if item[1] < remove_before:\n # Sequence data is old, so remove it\n self._queue.remove(item)\n else:\n # Sequence number was added recently, so don't remove it. Also\n # stop processing the queue because all later items will be\n # newer\n break", "def release(self, number: int) -> None:\n if number not in self.numbers_set:\n self.numbers_q.append(number)\n self.numbers_set.add(number)", "def delete_old():\n objs = (Snapshot\n .objects\n .filter(timestamp__lte=(datetime.now() - timedelta(days=35)))\n )\n objs.delete()", "def get_files_to_delete(all_files, keep_copies):\n LOG.debug(\"Retain %d files\", keep_copies)\n if keep_copies == 0:\n return all_files\n else:\n return all_files[:-keep_copies]", "def keep_old(ver: str) -> bool:\n ver = travis_normalize_py_version(ver)\n if ver == 'PyPy':\n return any(v.startswith('2') for v in new_versions)\n if ver == 'PyPy3':\n return any(v.startswith('3') for v in new_versions)\n return not is_important(ver)", "def cleanup():\n if len(env.releases) > 3:\n directories = env.releases\n directories.reverse()\n del directories[:3]\n env.directories = ' '.join([ '%(releases_path)s/%(release)s' % { 'releases_path':env.releases_path, 'release':release } for release in directories ])\n run('rm -rf %(directories)s' % env)", "def remove_old_songs(df, too_old=1970):\n drop_indices = df.index[df['year'] < too_old].tolist()\n df = df.drop(drop_indices)\n return df", "def _purge_old(self):\n now = dt_util.utcnow()\n\n _LOGGER.debug(\n \"%s: purging records older then %s(%s)\",\n self.entity_id,\n dt_util.as_local(now - self._samples_max_age),\n self._samples_max_age,\n )\n\n while self.ages and (now - self.ages[0]) > self._samples_max_age:\n _LOGGER.debug(\n \"%s: purging record with datetime %s(%s)\",\n self.entity_id,\n dt_util.as_local(self.ages[0]),\n (now - self.ages[0]),\n )\n self.ages.popleft()\n self.states.popleft()", "def releases():\n r = run('ls -x %(releases_path)s' % env)\n env.releases = sorted(r.split(\"\\t\"))\n if len(env.releases) >= 1:\n env.current_revision = env.releases[-1]\n env.current_release = '%(releases_path)s/%(current_revision)s' % env\n if len(env.releases) > 1:\n env.previous_revision = env.releases[-2]\n env.previous_release = '%(releases_path)s/%(previous_revision)s' % env\n\n #cleanup old releases. max 3 allowed.\n cleanup()", "def purge_outdated(self):\n todelete = []\n sql = \"select rowid, path, mtime from pictures\"\n cur = self.con.execute(sql)\n for rowid, path_str, mtime in cur:\n if mtime and op.exists(path_str):\n picture_mtime = os.stat(path_str).st_mtime\n if int(picture_mtime) <= mtime:\n # not outdated\n continue\n todelete.append(rowid)\n if todelete:\n sql = \"delete from pictures where rowid in (%s)\" % ','.join(map(str, todelete))\n self.con.execute(sql)", "def remove_n_nos(self, num_nos):\n for i in range(num_nos):\n elem = random.randint(1, 11 ** 4)\n self.remove(elem)", "def cleanup(self):\n results = run_command(\"gppkg -q --all\")\n gppkgs = results.split('\\n')[self.start_output:self.end_output] #The first line is 'Starting gppkg with args', which we want to ignore.\n\n for gppkg in gppkgs:\n run_command(\"gppkg --remove \" + gppkg)", "def delete_old_backup(self):\n print \"### Info ### Delete redundant backups\"\n for i in range(len(self.date_list)-20):\n os.remove(os.path.abspath(self.backup_path + U'/voc2brain_backup_' + str(self.date_list[0])+ \".sdb3\") )", "def removeOldCars(self):\n self.cars = [car for car in self.cars if (self.currentFrame - car.updatedInFrame) < DROP_AFTER_N_FRAMES]\n for i, car in enumerate(self.cars): # update id's\n car.id = i + 1", "def remove_old_logs():\r\n three_days_old = dt.date.today() - dt.timedelta(days=2)\r\n three_days_ago = three_days_old.strftime('%Y%m%d')\r\n\r\n for f in os.listdir(ANCILS_DIR):\r\n if not f.startswith(('model_configs_latest.txt', 'model_configs-2019-11-02.txt')):\r\n file_date = f.strip('.txt').split('_')[2].replace(\"-\",\"\")\r\n\r\n if not file_date.endswith('01'):\r\n if int(file_date) < int(three_days_ago):\r\n cmd1 = \"git add {}\".format(os.path.join(ANCILS_DIR, f))\r\n subprocess.run(cmd1, shell=True)\r\n cmd = \"git rm -f {}\".format(os.path.join(ANCILS_DIR, f))\r\n subprocess.run(cmd, shell=True)", "def delete_old_records(\n cls,\n task_id: str,\n dag_id: str,\n num_to_keep=conf.getint(\"core\", \"max_num_rendered_ti_fields_per_task\", fallback=0),\n session: Session = None,\n ):\n if num_to_keep <= 0:\n return\n\n tis_to_keep_query = (\n session.query(cls.dag_id, cls.task_id, cls.execution_date)\n .filter(cls.dag_id == dag_id, cls.task_id == task_id)\n .order_by(cls.execution_date.desc())\n .limit(num_to_keep)\n )\n\n if session.bind.dialect.name in [\"postgresql\", \"sqlite\"]:\n # Fetch Top X records given dag_id & task_id ordered by Execution Date\n subq1 = tis_to_keep_query.subquery('subq1')\n\n session.query(cls).filter(\n cls.dag_id == dag_id,\n cls.task_id == task_id,\n tuple_(cls.dag_id, cls.task_id, cls.execution_date).notin_(subq1),\n ).delete(synchronize_session=False)\n elif session.bind.dialect.name in [\"mysql\"]:\n # Fetch Top X records given dag_id & task_id ordered by Execution Date\n subq1 = tis_to_keep_query.subquery('subq1')\n\n # Second Subquery\n # Workaround for MySQL Limitation (https://stackoverflow.com/a/19344141/5691525)\n # Limitation: This version of MySQL does not yet support\n # LIMIT & IN/ALL/ANY/SOME subquery\n subq2 = session.query(subq1.c.dag_id, subq1.c.task_id, subq1.c.execution_date).subquery('subq2')\n\n session.query(cls).filter(\n cls.dag_id == dag_id,\n cls.task_id == task_id,\n tuple_(cls.dag_id, cls.task_id, cls.execution_date).notin_(subq2),\n ).delete(synchronize_session=False)\n else:\n # Fetch Top X records given dag_id & task_id ordered by Execution Date\n tis_to_keep = tis_to_keep_query.all()\n\n filter_tis = [\n not_(\n and_(\n cls.dag_id == ti.dag_id,\n cls.task_id == ti.task_id,\n cls.execution_date == ti.execution_date,\n )\n )\n for ti in tis_to_keep\n ]\n\n session.query(cls).filter(and_(*filter_tis)).delete(synchronize_session=False)", "def previous():\n releases_list = releases()\n try:\n return releases_list[-2]\n except IndexError:\n return None", "def delete_old(self):\n retention_time = Host().get_retention_time()\n snapshots = self._list_snapshots()\n\n for snap in snapshots['Snapshots']:\n snapshot_id = snap['SnapshotId']\n start_time = snap['StartTime']\n if start_time <= retention_time:\n self.resource.delete_snapshot(\n SnapshotId=snapshot_id,\n DryRun=DRY_RUN\n )\n self.deleted_ids.append(snapshot_id)\n\n notify = Notifier()\n notify.send(self.created_id, self.deleted_ids)", "def _remove_lost_detections(self, age_threshold=8):\n\n keep_detections = []\n for detection in self.detections:\n if detection.frames_undetected < self.delete_after and \\\n not (detection.is_hidden and detection.age < age_threshold):\n keep_detections.append(detection)\n self.detections = keep_detections", "def select_latest_micro_versions(versions):\n seen_minors = set()\n res = []\n\n for ver, _ in sorted(\n versions.items(),\n # Sort by (minor_version, upload_time) in descending order\n key=lambda x: (Version(x[0]).release[:2], x[1]),\n reverse=True,\n ):\n minor_ver = Version(ver).release[:2]\n\n if minor_ver not in seen_minors:\n seen_minors.add(minor_ver)\n res.insert(0, ver)\n\n return res", "def _remove_old_items(self):\n if self.size_limit is not None:\n while len(self) > self.size_limit:\n self.popitem(last=False)", "def remove_retired_generators(gen_assoc):\n existing = gen_assoc.loc[\n (gen_assoc.operational_status == 'existing')\n ]\n # keep the gens that retired mid-report-year that have generator\n # specific data\n retiring = gen_assoc.loc[\n (gen_assoc.operational_status == 'retired')\n & (gen_assoc.retirement_date.dt.year == gen_assoc.report_date.dt.year)\n & (gen_assoc.net_generation_mwh.notnull())\n ]\n\n # check how many generators are retiring mid-year that don't have\n # gen-specific data.\n retiring_removing = gen_assoc.loc[\n (gen_assoc.operational_status == 'retired')\n & (gen_assoc.retirement_date.dt.year == gen_assoc.report_date.dt.year)\n & (gen_assoc.net_generation_mwh.isnull())\n ]\n logger.info(\n f'Removing {len(retiring_removing.drop_duplicates(IDX_GENS))} '\n 'generators that retired mid-year out of '\n f'{len(gen_assoc.drop_duplicates(IDX_GENS))}'\n )\n\n gen_assoc_removed = pd.concat([existing, retiring])\n return gen_assoc_removed", "def delete_oldest_uploads(client: discovery.Resource,\n dataimport_ref: DataImportReference,\n max_to_keep: Optional[int] = None) -> list[str]:\n if max_to_keep is not None and max_to_keep <= 0:\n raise ValueError(f'Invalid value for argument `max_to_keep`. '\n f'Expected a strictly positive value. '\n f'Received max_to_keep={max_to_keep}.')\n request = client.management().uploads().list(\n accountId=dataimport_ref.account_id,\n webPropertyId=dataimport_ref.property_id,\n customDataSourceId=dataimport_ref.dataset_id)\n response = request.execute()\n sorted_uploads = sorted(response['items'], key=lambda x: x['uploadTime'])\n if max_to_keep is not None:\n uploads_to_delete = sorted_uploads[:-max_to_keep] # pylint: disable=invalid-unary-operand-type\n else:\n uploads_to_delete = sorted_uploads\n ids_to_delete = [x['id'] for x in uploads_to_delete]\n if ids_to_delete:\n request = client.management().uploads().deleteUploadData(\n accountId=dataimport_ref.account_id,\n webPropertyId=dataimport_ref.property_id,\n customDataSourceId=dataimport_ref.dataset_id,\n body={'customDataImportUids': ids_to_delete})\n request.execute()\n return ids_to_delete", "def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")", "def prune(self) -> None:\n\n deletion_count = self._delete(\n \"DELETE FROM bookmarks WHERE deleted IS NOT NULL\"\n )\n\n unit = \"row\" if deletion_count == 1 else \"rows\"\n\n cherrypy.engine.publish(\n \"applog:add\",\n \"bookmarks\",\n f\"{deletion_count} {unit} deleted\"\n )\n\n if deletion_count > 0:\n cherrypy.engine.publish(\n \"cache:clear\",\n \"bookmarks:all_tags\"\n )", "def remove_older_backups(days=30):\n oldest = arrow.now().shift(days=-30).timestamp\n files = [os.path.join(app.config['BACKUP_DIR'], x) for x in os.listdir(app.config['BACKUP_DIR']) if re.search(r'backup-(\\d{4}).zip', x, re.IGNORECASE)]\n for fpath in files:\n s = os.stat(fpath)\n if s.st_ctime < oldest:\n print(\"deleting\", fpath)\n os.unlink(fpath)", "def prune_obsolete_instances():\n\n removed_instances = []\n\n for instance in get_obsolete_instances(env.vhost_path):\n is_current = bool(get_instance_stamp(env.current_instance_path) == instance)\n is_previous = bool(get_instance_stamp(env.previous_instance_path) == instance)\n\n if not (is_current or is_previous):\n commands.delete(os.path.join(env.vhost_path, instance))\n removed_instances.append(instance)\n\n if removed_instances:\n print(green('\\nThese old instances were removed from remote filesystem:'))\n print(removed_instances)", "def _cleanup(self, fnum):\n while os.path.exists('%s.%s' % (self.name, fnum)):\n try:\n fname = '%s.%s' % (self.name, fnum)\n os.unlink(fname)\n # self.log.debug(\"Cleaned up file: %s\", fname)\n except:\n pass\n fnum -= 1", "def purge(self):\n if not self.index:\n return\n now = time()\n \n while self.expiry[0].orig_expires <= now or len(self.index) > MAX_ASSOCS:\n self.remove_one()\n if not self.expiry:\n if not self.index:\n return\n self.rotate_lists()\n return", "def delete_release(ctx, name):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Deleting release...', break_line=False)\n gh.delete_release(name=name)\n log.checkmark()\n except BaseException as _:\n log.xmark()\n raise", "def prune(self):\n self.sort(key=lambda chunk: chunk.probability)\n del self[:-self.model.num_parses]", "def clear_unversioned(self, min_age=None):\r\n if min_age is None:\r\n min_age = self.age_thresh_del_unversioned\r\n\r\n compilelock.get_lock()\r\n all_key_datas = self.module_hash_to_key_data.values()\r\n try:\r\n for key_data in all_key_datas:\r\n if not key_data.keys:\r\n # May happen for broken versioned keys.\r\n continue\r\n for key_idx, key in enumerate(key_data.keys):\r\n version, rest = key\r\n if version:\r\n # Since the version is included in the module hash,\r\n # it should not be possible to mix versioned and\r\n # unversioned keys in the same KeyData object.\r\n assert key_idx == 0\r\n break\r\n if not version:\r\n # Note that unversioned keys cannot be broken, so we can\r\n # set do_manual_check to False to speed things up.\r\n key_data.delete_keys_from(self.entry_from_key,\r\n do_manual_check=False)\r\n entry = key_data.get_entry()\r\n # Entry is guaranteed to be in this dictionary, because\r\n # an unversioned entry should never have been loaded via\r\n # refresh.\r\n assert entry in self.module_from_name\r\n\r\n del self.module_from_name[entry]\r\n del self.module_hash_to_key_data[key_data.module_hash]\r\n\r\n parent = os.path.dirname(entry)\r\n assert parent.startswith(os.path.join(self.dirname, 'tmp'))\r\n _rmtree(parent, msg='unversioned', level=logging.INFO,\r\n ignore_nocleanup=True)\r\n\r\n # Sanity check: all unversioned keys should have been removed at\r\n # this point.\r\n for key in self.entry_from_key:\r\n assert key[0]\r\n\r\n time_now = time.time()\r\n for filename in os.listdir(self.dirname):\r\n if filename.startswith('tmp'):\r\n try:\r\n open(os.path.join(self.dirname, filename, 'key.pkl')\r\n ).close()\r\n has_key = True\r\n except IOError:\r\n has_key = False\r\n if not has_key:\r\n # Use the compiled file by default\r\n path = module_name_from_dir(os.path.join(self.dirname,\r\n filename),\r\n False)\r\n # If it don't exist, use any file in the directory.\r\n if path is None:\r\n path = os.path.join(self.dirname, filename)\r\n files = os.listdir(path)\r\n if files:\r\n path = os.path.join(path, files[0])\r\n else:\r\n # If the directory is empty skip it.\r\n # They are deleted elsewhere.\r\n continue\r\n age = time_now - last_access_time(path)\r\n\r\n # In normal case, the processus that created this\r\n # directory will delete it. However, if this processus\r\n # crashes, it will not be cleaned up.\r\n # As we don't know if this directory is still used,\r\n # we wait one week and suppose that the processus\r\n # crashed, and we take care of the clean-up.\r\n if age > min_age:\r\n _rmtree(os.path.join(self.dirname, filename),\r\n msg='old unversioned', level=logging.INFO,\r\n ignore_nocleanup=True)\r\n finally:\r\n compilelock.release_lock()", "def test_version_remove_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('version remove 1.0')\n rv, output = self._execute('version list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def _revert(self):\n self.release_from_output(\"data\")\n # delete ONA submissions on ONA", "def remove_unimproved_species(self):\n for spec_num, spec in list(self.species.items()):\n if self.gen_num - spec.gen_last_improved > self.species_dropoff_age:\n self.species.pop(spec_num)", "def evaluate(self, new_filename) -> None:\n self.files_to_keep.append(new_filename)\n self.files_to_keep.sort()\n # If we have more files than we desire:\n if len(self.files_to_keep) > self.desired_backups:\n prev_timestamp = None\n for file in self.files_to_keep:\n timestamp = extract_datetime(file)\n if prev_timestamp is None:\n prev_timestamp = timestamp\n continue\n # If the time difference between two files is less than the desired interval:\n if timestamp - prev_timestamp < self.interval:\n self.files_to_keep.remove(file)\n break\n # If the time difference between two files is greater than or equal to the interval:\n if timestamp - prev_timestamp >= self.interval:\n prev_timestamp = timestamp\n # If we have too many files, get rid of the oldest one:\n if len(self.files_to_keep) > self.desired_backups:\n self.files_to_keep.pop(0)", "def clear_old(self, age_thresh_del=None, delete_if_problem=False):\r\n if age_thresh_del is None:\r\n age_thresh_del = self.age_thresh_del\r\n\r\n # Ensure that the too_old_to_use list return by refresh() will\r\n # contain all modules older than age_thresh_del.\r\n if age_thresh_del < self.age_thresh_use:\r\n if age_thresh_del > 0:\r\n _logger.warning(\"Clearing modules that were not deemed \"\r\n \"too old to use: age_thresh_del=%d, \"\r\n \"self.age_thresh_use=%d\",\r\n age_thresh_del,\r\n self.age_thresh_use)\r\n else:\r\n _logger.info(\"Clearing all modules.\")\r\n age_thresh_use = age_thresh_del\r\n else:\r\n age_thresh_use = None\r\n\r\n compilelock.get_lock()\r\n try:\r\n # Update the age of modules that have been accessed by other\r\n # processes and get all module that are too old to use\r\n # (not loaded in self.entry_from_key).\r\n too_old_to_use = self.refresh(\r\n age_thresh_use=age_thresh_use,\r\n delete_if_problem=delete_if_problem)\r\n\r\n for entry in too_old_to_use:\r\n # TODO: we are assuming that modules that haven't been\r\n # accessed in over age_thresh_del are not currently in\r\n # use by other processes, but that could be false for\r\n # long-running jobs, or if age_thresh_del < 0.\r\n assert entry not in self.module_from_name\r\n parent = os.path.dirname(entry)\r\n assert parent.startswith(os.path.join(self.dirname, 'tmp'))\r\n _rmtree(parent, msg='old cache directory', level=logging.INFO,\r\n ignore_nocleanup=True)\r\n\r\n finally:\r\n compilelock.release_lock()", "def remove_one(self):\n item = self.expiry.pop(0)\n if item.updated:\n self.new_expiry.append(item)\n return\n del self.index[item.target]\n return", "def keep_last_lines(self, num_lines):\n self.data = self.data[-num_lines:]", "def delete_old_news():\n # Configure the connection to the database\n client = MongoClient(os.environ['MongoDB_URI'])\n # client = MongoClient('localhost', 27017)\n db = client['kenya-news'] # Select the database\n collection = db.news\n time_boundary = datetime.now() - timedelta(hours=48)\n print(time_boundary.isoformat())\n collection.remove({'$or': [\n {'date': {'$lt': time_boundary.isoformat()}},\n {'date': {'$eq': 0}}\n ]})", "def purge(self, older_than=60*60*24, n_last=100):\n now = time.time()\n\n # on ticks\n m = 0\n for n, tick in enumerate(self._ticks):\n if now - tick[0] < older_than:\n m = n\n break\n\n if m > 0:\n # keep the m+1 recents\n self._ticks = self._ticks[m:]\n\n # per tf of candles\n for tf, candles in self._candles.items():\n m = 0\n\n for n, candle in enumerate(candles):\n if now - candle.timestamp < older_than:\n m = n\n break\n\n if m > 0:\n # keep the m+1 recents\n self._candles[tf] = candles[m:]\n\n # per tf of buy/sell signals\n for tf, buy_sells in self._buy_sells.items():\n m = 0\n\n for n, buy_sell in enumerate(buy_sells):\n if now - buy_sell.timestamp < older_than:\n m = n\n break\n\n if m > 0:\n # keep the m+1 recents\n self._buy_sells[tf] = buy_sells[m:]", "def trim_decreasing_digits(self):\n vals_to_del = defaultdict(list)\n for key in self.Poss_Tree:\n for choice in self.Poss_Tree[key]:\n if choice < int(str(key)[-1]):\n vals_to_del[key].append(choice)\n for key in vals_to_del:\n for val in vals_to_del[key]:\n self.Poss_Tree[key].remove(val)", "def trim_snapshots(self, delete=False):\r\n snaps = self.get_snapshots()\r\n # Always keep the oldest and the newest\r\n if len(snaps) <= 2:\r\n return snaps\r\n snaps = snaps[1:-1]\r\n now = datetime.datetime.now(snaps[0].date.tzinfo)\r\n midnight = datetime.datetime(year=now.year, month=now.month,\r\n day=now.day, tzinfo=now.tzinfo)\r\n # Keep the first snapshot from each day of the previous week\r\n one_week = datetime.timedelta(days=7, seconds=60*60)\r\n print midnight-one_week, midnight\r\n previous_week = self.get_snapshot_range(snaps, midnight-one_week, midnight)\r\n print previous_week\r\n if not previous_week:\r\n return snaps\r\n current_day = None\r\n for snap in previous_week:\r\n if current_day and current_day == snap.date.day:\r\n snap.keep = False\r\n else:\r\n current_day = snap.date.day\r\n # Get ourselves onto the next full week boundary\r\n if previous_week:\r\n week_boundary = previous_week[0].date\r\n if week_boundary.weekday() != 0:\r\n delta = datetime.timedelta(days=week_boundary.weekday())\r\n week_boundary = week_boundary - delta\r\n # Keep one within this partial week\r\n partial_week = self.get_snapshot_range(snaps, week_boundary, previous_week[0].date)\r\n if len(partial_week) > 1:\r\n for snap in partial_week[1:]:\r\n snap.keep = False\r\n # Keep the first snapshot of each week for the previous 4 weeks\r\n for i in range(0,4):\r\n weeks_worth = self.get_snapshot_range(snaps, week_boundary-one_week, week_boundary)\r\n if len(weeks_worth) > 1:\r\n for snap in weeks_worth[1:]:\r\n snap.keep = False\r\n week_boundary = week_boundary - one_week\r\n # Now look through all remaining snaps and keep one per month\r\n remainder = self.get_snapshot_range(snaps, end_date=week_boundary)\r\n current_month = None\r\n for snap in remainder:\r\n if current_month and current_month == snap.date.month:\r\n snap.keep = False\r\n else:\r\n current_month = snap.date.month\r\n if delete:\r\n for snap in snaps:\r\n if not snap.keep:\r\n boto.log.info('Deleting %s(%s) for %s' % (snap, snap.date, self.name))\r\n snap.delete()\r\n return snaps", "def prune(args):\n keep = []\n for path in args.cache:\n if os.path.exists(path):\n keep.append(path)\n else:\n sys.stderr.write('Removing: {}'.format(path) + '\\n')\n args.cache = keep\n args.update = True\n return", "def greater(data, version):\n result = list()\n given_version = parse(version)\n for release, info in data.items():\n python_version = Pypi._get_python_version(info)\n ver = parse(release)\n if not ver.is_prerelease and ver > given_version:\n result.append(dict(version=str(ver), python_version=python_version))\n\n return result", "def RemoveOldSnapshots(desktop):\n # Compute the file prefix of a snapshot created one day ago.\n yesterday = datetime.datetime.now() - datetime.timedelta(1)\n old_snapshot = yesterday.strftime('ChromiumSnapshot%Y%m%d%H%M%S')\n # Collect snapshots at least as old as that one created a day ago.\n to_delete = []\n for snapshot in glob.iglob(os.path.join(desktop, 'ChromiumSnapshot*.png')):\n if os.path.basename(snapshot) < old_snapshot:\n to_delete.append(snapshot)\n # Delete the collected snapshots.\n for snapshot in to_delete:\n print 'Removing old snapshot: %s' % snapshot\n try:\n os.remove(snapshot)\n except OSError, e:\n print >> sys.stderr, e", "def release(self, number):\n self.numbers.add(number)", "async def _maybe_release_last_part(self) -> None:\n ...", "def _remove_pub(pub):\n # counting publisher instance per topic name\n TopicBack.pub_instance_count[pub.name] -= 1\n\n # Be aware of https://github.com/ros/ros_comm/issues/111\n return pub.unregister()", "def delete_last_record():\n\tnewRcrds = list()\n\twith jsonlines.open('tempRecords.jsonl', mode='r') as readerOp:\n\t\tfor obj in readerOp:\n\t\t\tnewRcrds.append(obj)\n\twith jsonlines.open('tempRecords.jsonl', mode='w') as writerOp:\n\t\tif len(newRcrds) != 1:\n\t\t\t# checking if the record being removed is the last record which has file names.\n\t\t\tfor obji in newRcrds[:len(newRcrds)-1]:\n\t\t\t\twriterOp.write(obji)\n\t\telse:\n\t\t\t# if its the last record then do not delet it, as it is required for annotation data\n\t\t\tfor obji in newRcrds[:len(newRcrds)]:\n\t\t\t\twriterOp.write(obji)", "def purge(self,\r\n noterange=None):\r\n\r\n if noterange is None:\r\n noterange = [str(Index(a_temp))\r\n for a_temp\r\n in self.indexes()]\r\n for i_temp in [str(Index(n))\r\n for n in self.indexes()\r\n if Index(n) > Index(str(0))\r\n and str(Index(n)) in noterange]:\r\n if (len(str(self.get_keys_from_note(i_temp))) < 5\r\n and self.get_text_from_note(i_temp).replace(EOL,\r\n EMPTYCHAR).strip() == EMPTYCHAR):\r\n\r\n self.softdelete(i_temp)", "def maintain():\n restic = Restic(config.env, options=config.options)\n\n if restic.need_init():\n logger.info(\"Repository has not been initialized\")\n return\n\n restic.run(\n [\n \"forget\",\n \"--keep-within\",\n \"5d\",\n \"--keep-daily\",\n \"14\",\n \"--keep-weekly\",\n \"5\",\n \"--keep-monthly\",\n \"12\",\n \"--keep-yearly\",\n \"2\",\n ],\n )", "def purge_history(sc, table, history_table, keep_latest_n):\n\n if sys.platform != \"darwin\":\n # remove the corresponding s3 location - safety check that the location is a run_id location in particular buckets.\n # wants to make sure we are deleting s3 path with expected pattern.\n # Expected S3 path is in the format of - {some s3 bucket}/{some folder}/{**optional subfolders**}/{job name folder}/{folder with run id}/*.{file extension}\n\n path_regex=re.compile('s3://MyCompany[.a-z_-]*/[.a-z_-]*(/[.a-z_-]*)?/[a-z-_]*/run_id=\\d{8}_\\d{4}')\n path_regex_group = re.compile(r'^s3://(?P<bucket>.*?)/(?P<key>.*)')\n\n client = boto3.client('s3')\n s3 = boto3.resource('s3')\n s3_rm_path = []\n keys_in_parent = []\n keys_to_purge = []\n\n if history_table is not None:\n partitions = sc.sql(\"show partitions {hist_table_name}\".format(hist_table_name=history_table)).collect()\n\n # modifying this code as higher version of hive has key as 'partition', instead of 'result' . Hive 2.3.3-amzn-2\n # partitions = [_i.result for _i in partitions]\n\n partitions = [_i.asDict().values()[0] for _i in partitions]\n partitions.sort(reverse=True)\n\n if len(partitions) > keep_latest_n:\n partitions_to_purge = partitions[keep_latest_n:]\n\n for _i in range(len(partitions_to_purge)):\n partition_val = partitions_to_purge[_i].split('=')[1]\n df = sc.sql(\"describe formatted {hist_table_name} partition (run_id='{partition_val}')\".format(hist_table_name=history_table, partition_val=partition_val))\n\n s3_rm_path.append(df.where(df.col_name.startswith('Location')).select('data_type').collect()[0]['data_type'])\n\n # drop this partition from the table\n sc.sql(\"alter table {hist_table_name} drop if exists partition (run_id='{partition_val}')\".format(hist_table_name=history_table, partition_val=partition_val))\n\n else:\n # delete old s3 run_ids which will be there in the parent folder\n df = sc.sql(\"describe formatted {table_name}\".format(table_name=table))\n location = df.where(df.col_name.startswith('Location')).select('data_type').collect()[0]['data_type']\n m = re.match(path_regex_group, location).groupdict()\n bucket_name = m['bucket']\n parent_key = m['key'].split(\"=\")[0].replace(\"run_id\", \"\")\n response = client.list_objects_v2(Bucket=bucket_name, Prefix=parent_key, Delimiter=\"/\")\n list_of_keys = [i['Prefix'] for i in response['CommonPrefixes']]\n\n for i in list_of_keys:\n keys_in_parent.append(i.split(\"run_id=\")[1].replace(\"/\", \"\"))\n\n keys_in_parent.sort(reverse=True)\n\n if len(keys_in_parent) > keep_latest_n:\n keys_to_purge = keys_in_parent[keep_latest_n:]\n\n for _i in keys_to_purge:\n s3_rm_path.append(os.path.join(\"s3://\", bucket_name, parent_key, \"run_id=\"+_i))\n\n # remove the paths from s3\n for _i in s3_rm_path:\n if re.match(path_regex, _i):\n m = re.match(path_regex_group, _i).groupdict()\n bucket = s3.Bucket(m['bucket'])\n for obj in bucket.objects.filter(Prefix=m['key']):\n s3.Object(bucket.name, obj.key).delete()", "def remove_launch(self, index):\n self.launches.pop(index)", "def remove_discarded(self):\n while self.shrink_target.has_discards:\n discarded = []\n\n for ex in self.shrink_target.examples:\n if ex.discarded and (not discarded or ex.start >= discarded[-1][-1]):\n discarded.append((ex.start, ex.end))\n\n assert discarded\n\n attempt = bytearray(self.shrink_target.buffer)\n for u, v in reversed(discarded):\n del attempt[u:v]\n\n if not self.incorporate_new_buffer(attempt):\n break", "def remove_entry(self, number: int) -> None:\n raise NotImplementedError", "def delete_build(self, package, version):\n with self._conn.begin():\n self._conn.execute(\n \"VALUES (delete_build(%s, %s))\", (package, version))", "def get_latest_release(account = None):\n names = get_db_name(account=account, db_type=\"compara\")\n compara = []\n for name in names:\n compara += [int(name.Release)]\n return str(max(compara))", "def remove_old_cart_items():\n print \"Removing old carts\"\n remove_before = datetime.now() + timedelta(days=-settings.SESSION_AGE_DAYS)\n cart_ids = []\n old_items = CartItem.objects.values('cart_id').annotate(last_change=Max('date_added')).filter(\n last_change__lt=remove_before).order_by()\n for item in old_items:\n cart_ids.append(item['cart_id'])\n to_remove = CartItem.objects.filter(cart_id__in=cart_ids)\n to_remove.delete()\n print str(len(cart_ids)) + \" carts were removed\"", "def clean_up_old_exports(self):\n threshold = datetime.datetime.utcnow() - datetime.timedelta(days=30)\n self.session.query(Export).filter(Export.started_at < threshold).delete()", "def prune(self, min_count):\n if not self.sorted:\n self.sort()\n for k, count in enumerate(self.Nx):\n if count < min_count:\n self.truncate(k)\n break", "def free(n):\n global _tracks\n _tracks[n] = False\n while not _tracks[-1]:\n _tracks.pop()\n if len(_tracks) == 0:\n break", "def unpin(ctx, releaser, release_group, nocheck):\n # Doing for loops separately so an exit doesn't happen midway\n for group in release_group:\n if not nocheck and not releaser.is_valid_release_group(group):\n click.echo(f\"Invalid release group: {group}\")\n exit(3)\n\n for group in release_group:\n # Empty commit ID unpins devices\n releaser.set_release(None, group)", "def snapshots_to_keep(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"snapshots_to_keep\")", "def snapshots_to_keep(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"snapshots_to_keep\")", "def snapshots_to_keep(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"snapshots_to_keep\")", "def snapshots_to_keep(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"snapshots_to_keep\")", "def remove_old_repos():\n with open(repolist_file, \"r+\") as repofile:\n repolist = repofile.readlines()\n rm_indices = []\n for idx in xrange(len(repolist)):\n l = repolist[idx].strip()\n if re.match('^[x]',l):\n repodir = clone_dir + \"/\" + os.path.basename(l)\n shutil.rmtree(repodir, ignore_errors=True)\n rm_indices.append(idx)\n for i in rm_indices:\n del repolist[i]\n repofile.seek(0)\n repofile.truncate(0)\n repofile.flush()\n repofile.writelines(repolist)\n pass", "def remove():", "def release(self, number: int) -> None:\n self.nums.add(number)", "def latest(data):\n result = dict()\n version = parse(\"0\")\n for release, info in data.items():\n python_version = Pypi._get_python_version(info)\n ver = parse(release)\n if not ver.is_prerelease:\n version = max(version, ver)\n python_version = python_version\n\n result = dict(version=str(version), python_version=python_version)\n\n return [result]", "def _remove_key(self):\n heaps = self.priorities\n keys = heaps.keys()\n keys = min(keys)\n heaps.pop(keys)", "def choose_version(self):\n if len(self.unused_versions) == 0:\n self.unused_versions = list(range(len(self.versions)))\n idx = np.random.choice(self.unused_versions)\n self.unused_versions.remove(idx)\n version = self.versions[idx]\n return version", "def remove_files(max_age_sec):\n with session_transaction() as session:\n nb_deleted = File.remove_old_files(max_age_sec, session)\n log.debug(\"Max_age_sec: %s Nb_deleted: %s\", max_age_sec, nb_deleted)\n return nb_deleted", "def deleteLastNeedle(self):\r\n # productive #onButton\r\n profprint(self.getName())\r\n widget = slicer.modules.NeedleFinderWidget\r\n if widget.deleteNeedleButton.isEnabled() and self.lastNeedleNames:\r\n name = self.lastNeedleNames.pop()\r\n print \"removing needle with name: \", name\r\n while slicer.util.getNodes(name + '*') != {}:\r\n nodes = slicer.util.getNodes(name + '*')\r\n for node in nodes.values():\r\n slicer.mrmlScene.RemoveNode(node)\r\n # rebuild report table\r\n ID = name.lstrip('auto-seg_').lstrip('manual-seg_').lstrip('obturator-seg_').lstrip('0123456789').lstrip('-ID-vtkMRMLModelNode').rstrip('manual-seg_').rstrip('0123456789')\r\n print \"needle ID: <%s>\" % ID\r\n self.deleteNeedleFromTable(int(ID))", "def delete_old_tasks(max_task_age=86400):\n from .models import Task\n Task.tasks.delete_old_tasks(max_task_age)", "def remove():\n run('pew rm {0}'.format(package_name()))", "def cleanup_old_backups(self):\n print(\"Cleaning Old Backups for media files\")\n\n file_list = utils.get_backup_file_list(\n self.get_databasename(),\n self.get_servername(),\n 'media.tar.gz',\n self.storage\n )\n\n for backup_date, filename in file_list[0:-dbbackup_settings.CLEANUP_KEEP_MEDIA]:\n if int(backup_date.strftime(\"%d\")) != 1:\n print(\" Deleting: %s\" % filename)\n self.storage.delete_file(filename)", "def Releases():\n return releases", "def purge_last(self, tf, n):\n if tf > 0:\n candles = self._candles.get(tf)\n if candles and len(candles) > n:\n self._candles[tf] = candles[-n:]\n elif self._ticks and len(self._ticks) > n:\n self._ticks = self._ticks[-n:]", "def delete_num(self, num):\r\n saved = task2.ListADT()\r\n saved.append(\"d\")\r\n if num == \"\":\r\n saved.append(0)\r\n for line_num in range(len(self.text_lines)):\r\n saved.append(self.text_lines[0])\r\n self.text_lines.delete(0)\r\n else:\r\n num = int(num)\r\n if num == 0:\r\n raise ValueError(\"Zero is not a valid line number\")\r\n elif num > 0:\r\n num -= 1\r\n saved.append(num)\r\n saved.append(self.text_lines[num])\r\n self.text_lines.delete(num)\r\n self.memory.push(saved)", "def purge(self):\n keys = [k for (k, v) in self.get_range()]\n\n [self.remove(k) for k in keys]", "def deleteLastNeedle(self):\n #productive #onButton\n profprint(self.getName())\n widget = slicer.modules.NeedleFinderWidget\n if widget.deleteNeedleButton.isEnabled() and self.lastNeedleNames:\n name=self.lastNeedleNames.pop()\n print \"removing needle with name: \",name\n while slicer.util.getNodes(name+'*') != {}:\n nodes = slicer.util.getNodes(name+'*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)\n # rebuild report table\n ID=name.lstrip('python-catch-round_').lstrip('manual-seg_').lstrip('obturator-seg_').lstrip('0123456789').lstrip('-ID-vtkMRMLModelNode')\n print \"needle ID: <%s>\"%ID\n self.deleteNeedleFromTable(int(ID))" ]
[ "0.64156675", "0.6380838", "0.63680077", "0.56757736", "0.544219", "0.5342159", "0.5324215", "0.52694285", "0.5213804", "0.51900595", "0.51862675", "0.5181022", "0.51753306", "0.51589394", "0.51021165", "0.50865173", "0.50815326", "0.5056275", "0.5055268", "0.505497", "0.5032865", "0.5020655", "0.50156844", "0.4998614", "0.49933705", "0.49908164", "0.49771422", "0.49425587", "0.49103996", "0.49086845", "0.4896209", "0.48938656", "0.4873395", "0.48607752", "0.48599324", "0.4854515", "0.48391497", "0.4834638", "0.48292273", "0.48258784", "0.48236623", "0.48223388", "0.48219296", "0.48171097", "0.48041332", "0.47963166", "0.47906497", "0.47648692", "0.4747524", "0.4747496", "0.47452348", "0.4745161", "0.47408938", "0.47375676", "0.4730055", "0.47229537", "0.47152418", "0.47137406", "0.4711096", "0.46992913", "0.46951208", "0.46764094", "0.46635997", "0.46613616", "0.4646572", "0.46457937", "0.4641453", "0.46368274", "0.4635892", "0.46259755", "0.4620738", "0.4616814", "0.46065062", "0.4603442", "0.46007967", "0.45943183", "0.45942912", "0.45842084", "0.45808962", "0.45791724", "0.4576781", "0.4576781", "0.4576781", "0.4576781", "0.4573777", "0.45710862", "0.45700285", "0.45681435", "0.45662472", "0.45618343", "0.45571437", "0.45543662", "0.4550664", "0.454732", "0.45438996", "0.45429823", "0.4541593", "0.45382926", "0.45160422", "0.4511665" ]
0.69273704
0
Class handles loading data for many separate nodes.
def __init__(self, dataset: str, train: bool, subset: bool): PERCENT = .3 if dataset == 'MNIST': data = torchvision.datasets.MNIST('./data', train=train, download=True, transform=torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize( (0.1307,), (0.3081,)) ])) else: raise ValueError data_size = len(data) self.data = data if subset: indx = torch.randperm(data_size)[:int(data_size * PERCENT)] self.samples = self.data.data[indx, :, :] self.labels = self.data.targets[indx] else: self.samples = self.data.data self.labels = self.data.targets self.random_seed = 42
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data():\n\n server_node = load_nodes(SERVER_NODE_INFILE)\n road_node = load_nodes(ROAD_NODE_INFILE)\n road_segment_point = load_nodes(ROAD_SEGMENT_POINT_INFILE)\n\n return server_node, road_node, road_segment_point", "def load_data(self):", "def dispatch_load(self):\n\n for node in self.nodes:\n threading.Thread(target=self.__start_load, args=(node,)).start()", "def load_data(self) -> None:", "def load(self):\n #self.df = read_file(\"../data/yelp_academic_dataset_user.json\") #Full Data.\n self.df = read_file(\"../data/user300.json\") #For local machine.\n #self.get_friend_list()\n #self.save_friend_nodes()", "def __init_load(self, c_config):\n\n n_config = c_config['nodes']\n lst = []\n for n in n_config:\n node_num = n['count']\n # generate a number of nodes with the same config according to \"count\" property\n for i in range(node_num):\n node = Node({**c_config['metrics'], **n['additional_metrics']}, c_config['schema'],\n c_config['table'], n['id'])\n lst.append(node)\n self.nodes = lst", "def load_data(self):\n raise NotImplementedError()", "def load_data(root, num_seen, batch_size, num_workers):\n CIFAR10.init(root, num_seen)\n query_dataset = CIFAR10('query', transform=query_transform())\n seen_dataset = CIFAR10('seen', transform=train_transform())\n unseen_dataset = CIFAR10('unseen', transform=train_transform())\n retrieval_dataset = CIFAR10('retrieval', transform=train_transform())\n\n query_dataloader = DataLoader(\n query_dataset,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n seen_dataloader = DataLoader(\n seen_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n unseen_dataloader = DataLoader(\n unseen_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n retrieval_dataloader = DataLoader(\n retrieval_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n return query_dataloader, seen_dataloader, unseen_dataloader, retrieval_dataloader", "def loadData(self):\n machineToNode = {}\n self.listOfMachines = []\n nextID = 0\n self.processingSteps = []\n with open(self.filename) as f:\n lines = f.read().splitlines()\n for line in lines:\n formatted = line.split(\"\\t\")\n order = int(formatted[0])\n machine = int(formatted[1])\n timestamp = float(formatted[2])\n if machine not in machineToNode: # normalizing machines according to the nodes (1,2,3... instead of 1,34,2...)\n machineToNode[machine] = nextID\n nextID +=1\n self.listOfMachines.append(machineToNode[machine]) # normalized list of all machines\n\n pstep = ProcessingStep(machineToNode[machine], timestamp, order)\n self.processingSteps.append(pstep)", "def _load_cluster(self):", "def __init__(self, datacfg_file=None, envcfg_file=None, resources_file=None):\n\n super(LoadHandler, self).__init__(datacfg_file, envcfg_file, resources_file)\n\n logkv(logger, {\"msg\": \"Starting load\",\n \"dataset\": self.get_config(\"dataset_name\")}, \"info\")\n\n # Initialize fields that will be filled in methods of this class.\n self.propfile = None\n self.newdirs = None\n self.locked = False\n self.load_type = None\n\n # Get primary HDFS namenode before proceeding with load\n namenodes = self.get_config(\"webhdfs_root\").split(\",\")\n try:\n self.primary_namenode = \\\n self.hdfs_mgr.get_primary_namenode(namenodes,\n self.get_config(\"hdfs_root\"),\n self.get_config(\"hdfs_user\"))\n except HdfsManagerException as ex:\n logkv(logger, {\"msg\": \"Failed to get primary namenode\"}, \"error\", ex)\n raise LoadHandlerException()\n\n # Instantiate Oozie manager\n self.oozie_mgr = OozieManager()\n\n # Get folder processing delay\n try:\n self.process_delay = float(self.get_config(\"folder_processing_delay\"))\n except ValueError as ex:\n logkv(logger,\n {\"msg\": \"Could not parse folder_processing_delay as a float\"},\n \"error\")\n raise LoadHandlerException()\n\n # # Instantiate NewRelic manager\n # try:\n # self.newrelic_mgr = NewRelicManager(self.get_config(\"newrelic_api_key\", configtype=\"env\"),\n # self.get_config(\"newrelic_dataset_name\"),\n # self.get_config(\"newrelic_url\"))\n #\n # except Exception as ex:\n # logkv(logger, {\"msg\": \"Failed to initialize NewRelic Manager\"}, \"error\")\n # raise LoadHandlerException()", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def __load(self, node):\n\n self.tiles = node['data']\n self.name = node['name']\n self.opacity = node['opacity']\n self.visible = node['visible']", "def _load(self, data):\n raise NotImplementedError(\"Don't know how to load the task\")", "def _process_nodes(self):\n # Sort the nodes by metanode type, then by id\n self.node_df = self.node_df.sort_values(['label', 'id']).reset_index(drop=True)\n # Get all the ids\n self.nodes = self.node_df['id']\n # Get mapping from the index to the node ID (one to many so need different one for each node type)\n self.index_to_nid = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.index_to_nid[group_name] = group['id'].reset_index(drop=True).to_dict()\n # Get the reverse mapping (many to one so don't need to separate based on type).\n self.nid_to_index = dict()\n for mapper in self.index_to_nid.values():\n for index, nid in mapper.items():\n self.nid_to_index[nid] = index\n # Finally, we need a mapper from id to node type\n self.id_to_metanode = self.node_df.set_index('id')['label'].to_dict()\n # And from node type to a list of ids\n self.metanode_to_ids = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.metanode_to_ids[group_name] = group['id'].tolist()\n # One more mapper of id to name\n self.nid_to_name = self.node_df.set_index('id')['name'].to_dict()", "def __init__(self, data, node):\n self.data = data\n self.node = node # This is the data structure which holds the data for this node, e.g. lat, lon, etc.", "def load_data_file(self):\n with open(self.files['data'], 'r') as infile:\n data = json.load(infile)\n self.boundary_nodes = data['boundary_nodes']\n self.nodes = {int(k): v for k, v in data['nodes'].items()}\n self.levels = data['levels']\n infile.close()", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._results_ = None", "def load_mnist_data(nr_nodes, nr_classes, allocation, subset, batch_size):\n train_loader_list = []\n test_loader_list = []\n\n train = LoadData('MNIST', True, subset)\n test = LoadData('MNIST', False, False)\n\n train_data, train_targets = train.split(allocation, nr_nodes, class_per_node=nr_classes)\n for data, targets in zip(train_data, train_targets):\n train_dataset = CustomDataset(data, targets)\n train_loader_list.append(DataLoader(train_dataset, batch_size=batch_size, shuffle=True))\n\n test_data, test_targets = test.split('uniform', nr_nodes)\n for data, targets in zip(test_data, test_targets):\n test_dataset = CustomDataset(data, targets)\n test_loader_list.append(DataLoader(test_dataset, batch_size=batch_size, shuffle=False))\n\n return train_loader_list, test_loader_list", "def _custom_data_loader(self) -> DataLoader:\n dataloaders = DataLoader(self.dataset, batch_size=1)\n return dataloaders", "def load_all_data_from_file(self) -> None:\n self.load_gene_data_from_file()\n self.load_ontology_from_file(ontology_type=DataType.GO, ontology_url=self.go_ontology_url,\n ontology_cache_path=self.go_ontology_cache_path,\n config=self.config)\n self.load_associations_from_file(associations_type=DataType.GO, associations_url=self.go_associations_url,\n associations_cache_path=self.go_associations_cache_path, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.DO, ontology_url=self.do_ontology_url,\n ontology_cache_path=self.do_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.DO, associations_url=self.do_associations_url,\n associations_cache_path=self.do_associations_cache_path,\n association_additional_cache_path=self.do_associations_new_cache_path,\n association_additional_url=self.do_associations_new_url, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.EXPR, ontology_url=self.expression_ontology_url,\n ontology_cache_path=self.expression_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.EXPR,\n associations_url=self.expression_associations_url,\n associations_cache_path=self.expression_associations_cache_path,\n config=self.config)\n self.load_orthology_from_file()\n self.load_expression_cluster_data()\n self.load_protein_domain_information()", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def _loadData(self, data):\n self._data = data\n self.id = utils.cast(int, data.attrib.get('id'))\n self.accountID = utils.cast(int, data.attrib.get('accountID'))\n self.serverId = utils.cast(int, data.attrib.get('serverId'))\n self.machineIdentifier = data.attrib.get('machineIdentifier')\n self.name = data.attrib.get('name')\n self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))\n self.numLibraries = utils.cast(int, data.attrib.get('numLibraries'))\n self.allLibraries = utils.cast(bool, data.attrib.get('allLibraries'))\n self.owned = utils.cast(bool, data.attrib.get('owned'))\n self.pending = utils.cast(bool, data.attrib.get('pending'))", "def _loadData(self, data):\n self._data = data\n self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))\n self.email = data.attrib.get('email')\n self.friend = utils.cast(bool, data.attrib.get('friend'))\n self.friendlyName = data.attrib.get('friendlyName')\n self.home = utils.cast(bool, data.attrib.get('home'))\n self.id = utils.cast(int, data.attrib.get('id'))\n self.server = utils.cast(bool, data.attrib.get('server'))\n self.servers = self.findItems(data, MyPlexServerShare)\n self.thumb = data.attrib.get('thumb')\n self.username = data.attrib.get('username', '')\n for server in self.servers:\n server.accountID = self.id", "def _load(self):\n\t\tpool = []\n\t\tview = []\n\t\tlibrary = []\n\n\t\tif is_file(\"~/comiccrawler/pool.json\"):\n\t\t\tpool = json.loads(content_read(\"~/comiccrawler/pool.json\"))\n\n\t\tif is_file(\"~/comiccrawler/view.json\"):\n\t\t\tview = json.loads(content_read(\"~/comiccrawler/view.json\"))\n\n\t\tif is_file(\"~/comiccrawler/library.json\"):\n\t\t\tlibrary = json.loads(content_read(\"~/comiccrawler/library.json\"))\n\n\t\tfor m_data in pool:\n\t\t\t# reset state\n\t\t\tif m_data[\"state\"] in (\"DOWNLOADING\", \"ANALYZING\"):\n\t\t\t\tm_data[\"state\"] = \"ERROR\"\n\t\t\t# build episodes\n\t\t\tepisodes = []\n\t\t\tfor ep_data in m_data[\"episodes\"]:\n\t\t\t\tepisodes.append(Episode(**ep_data))\n\t\t\tm_data[\"episodes\"] = episodes\n\t\t\tmission = Mission(**m_data)\n\t\t\tself._add(mission)\n\n\t\tfor url in view:\n\t\t\tself.view[url] = self.pool[url]\n\n\t\tfor url in library:\n\t\t\tself.library[url] = self.pool[url]\n\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", self.view)\n\t\tself.bubble(\"MISSION_LIST_REARRANGED\", self.library)", "def load_all(cls, data):\n return [cls.load(obj) for obj in data]", "def import_data(self, filename=None, rawdata=None, append=False):\n \n if filename:\n with open(filename,\"r\") as f:\n data = f.read()\n elif rawdata:\n data = rawdata\n else:\n raise Exception(\"No data given\")\n\n if not append:\n self.nodelist = []\n\n d = deserialize(data, self.consolidator)\n self.nodelist += list(d.nodes.values())\n if append:\n self.domain_obj = None #mark as outdated\n else:\n self.domain_obj = d", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def __init__(self, data, node):\n self.data = data\n self.node = node", "def __init__(self):\n\n self.nodes = {}", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def load_data(self):\n overlength_num = title_num = 0\n with open(self.path, 'r', encoding='utf-8') as r:\n for line in r:\n inst = json.loads(line)\n is_title = inst['sent_id'].endswith('-3') and inst['tokens'][-1] != '.'\n if self.ignore_title and is_title:\n title_num += 1\n continue\n\n # TODO: add back coarse type\n for event in inst['event_mentions']:\n event_type = event['event_type']\n if ':' in event_type:\n event['event_type'] = event_type.split(':')[1].upper()\n self.data.append(inst)\n\n if title_num:\n print('Discarded {} titles'.format(title_num))\n print('Loaded {} instances from {}'.format(len(self), self.path))", "def _loadData(self, data):\n Episode._loadData(self, data)\n PlexHistory._loadData(self, data)", "def load_random_data(self, parts, nodes, max_nodes):\n\n self.parts = parts\n self.nodes = nodes\n self.max_nodes = max_nodes\n\n if self.verbose:\n print 'Generating random data using nodes:' + str(nodes) + \\\n ' parts:' + str(parts) + ' max nodes:' + str(max_nodes)\n\n node_list = []\n node_list.extend(range(1, nodes))\n\n # for each part we want to add a random number of nodes from the node list\n for i in range(1, parts):\n self.data_dict[i] = random.sample(node_list, random.randint(2, max_nodes))", "def __init__(self, nodes=None):\r\n self.nodes = nodes", "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def __init__(self, c_config, client):\n self.id = c_config['id']\n self.nodes = []\n self.client = client\n self.period = 20\n self.__init_load(c_config)", "def __init__(self, nodes):\n\n self._nodes = nodes", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def _load(self, dataset):\n raise NotImplementedError('Loader {} does not support loading datasets.'.format(self.type()))", "def _loadData(self, data):\n Episode._loadData(self, data)\n PlexSession._loadData(self, data)", "def _fetch_data(self):\n pass", "def loadData(self):\n infile = open(PublicTransit.PICKLE_SAVE_FILE, 'rb')\n self.nodesDict = cPickle.load(infile)\n self.linksDict = cPickle.load(infile)\n self.stopsByRoute = cPickle.load(infile)\n self.stopsByNode = cPickle.load(infile)\n self.routeXref = cPickle.load(infile)\n self.transitRoutes = cPickle.load(infile)\n infile.close()\n self.spIndex = index.Index(PublicTransit.SPATIAL_INDEX_FILE)\n #last step is to reconcile all of the nodes into single objects\n #use routePattern dictionary as the master\n self.stopsDict = {}\n for routePattern in self.stopsByRoute:\n for stop in self.stopsByRoute[routePattern]:\n if stop.stopPointId in self.stopsDict:\n self.stopsDict[stop.stopPointId].append(stop)\n else:\n self.stopsDict[stop.stopPointId] = [stop]\n if stop.tanaNode in self.stopsByNode:\n for i in range(len(self.stopsByNode[stop.tanaNode])):\n nodeStop = self.stopsByNode[stop.tanaNode][i]\n if nodeStop.basicEqual(stop):\n self.stopsByNode[stop.tanaNode][i] = stop", "def loadObjectNodes(self):\n #print \"Trying to dynamically load objects from storage\"\n for name, module in self.object_modules.iteritems():\n #print \"Loading object names for object type: \" + name\n object_dir = os.path.join(self.save_location, name)\n #grab the object names from the filenames and use them to populate\n # the lists of objects\n if os.path.exists(object_dir) and os.listdir(object_dir) != []:\n self.objects[name] = [game_objects.ObjectUtilities.ObjectNode(self, partition(filename, '.')[0], module) for filename in os.listdir(object_dir)]\n self.objects[name].sort()\n\t else:\n\t\tself.objects[name] = []\n #print \"Object list:\"\n #for o in self.objects[name]:\n # print o\n #alert listeners to happy initialization\n self.sendODBEvent(ODBInitialize())", "def __init__(self, data):\n\n super(DataNode, self).__init__()\n\n # the current data\n self._data = data\n # the set of children\n self._children = set()", "def __init__(self):\n self.node = None\n self.data = None", "def __init__(self, node_map, edge_list):\n self.node_map = node_map\n self.edge_list = edge_list\n self.sif_list = []\n categories = Categories()\n abbrev_map = categories.abbrev_category_map\n\n self.data_list = []\n for node_id in node_map:\n node = node_map[node_id]\n node.sif_id = abbrev_map[node.category] + \"_\" + node.label\n dict = {}\n dict[\"id\"] = node.id\n dict[\"label\"] = node.sif_id\n dict[\"category\"] = node.category\n node_dict = {}\n node_dict[\"data\"] = dict\n self.data_list.append(node_dict)\n\n edge_id = 0\n for edge in edge_list:\n dict = {}\n dict[\"id\"] = \"e\" + str(edge_id)\n dict[\"source\"] = edge.source_id\n dict[\"target\"] = edge.target_id\n node_dict = {}\n node_dict[\"data\"] = dict\n self.data_list.append(node_dict)\n\n s_node = self.node_map[edge.source_id]\n t_node = self.node_map[edge.target_id]\n self.sif_list.append([s_node.sif_id, t_node.sif_id])\n edge_id += 1", "def fetch_data(self):", "def loadData(self,filepath):\r\n self.removeCheckbuttons()\r\n self.tree = ET.parse(filepath)# Parse xml Tree\r\n self.data = self.tree.getroot().find(\"data\")# Find Data\r\n self.sensors = [i.text for i in self.tree.getroot().find('columns')]# Get Sensor Names\r\n for s in self.sensors:# Add Each Sensor as Option\r\n self.addOption(s)", "def load_and_process(data_dir, train_node_num, eval_node_num, test_node_num):\n biases, feature, label = get_biases_features_labels(data_dir)\n # split training, validation and testing set\n nodes_num = label.shape[0]\n train_mask = get_mask(nodes_num, 0, train_node_num)\n eval_mask = get_mask(nodes_num, train_node_num, train_node_num + eval_node_num)\n test_mask = get_mask(nodes_num, nodes_num - test_node_num, nodes_num)\n\n y_train = np.zeros(label.shape)\n y_val = np.zeros(label.shape)\n y_test = np.zeros(label.shape)\n\n y_train[train_mask, :] = label[train_mask, :]\n y_val[eval_mask, :] = label[eval_mask, :]\n y_test[test_mask, :] = label[test_mask, :]\n\n y_train = y_train[np.newaxis]\n y_val = y_val[np.newaxis]\n y_test = y_test[np.newaxis]\n train_mask = train_mask[np.newaxis]\n eval_mask = eval_mask[np.newaxis]\n test_mask = test_mask[np.newaxis]\n\n return feature, biases, y_train, train_mask, y_val, eval_mask, y_test, test_mask", "def load_data(self):\n if not os.path.isfile(\"{}/OFF_data.json\".format(settings.DIR_PATH)):\n self.request_constructor(settings.R_COLLECTION['category'], 'NULL', 'tags')\n self.crawl_data('category')\n i = 0\n for item in self.categories:\n i += 1\n cat = item.get(\"name\")\n self.request_constructor(settings.R_COLLECTION['product'], cat, 'products')\n self.crawl_data('product')\n\n self.data = {\"categories\": self.categories, \"products\": self.products}\n self.save_data('OFF_data.json')\n else:\n with open(\"{}/OFF_data.json\".format(settings.DIR_PATH), 'r') as f:\n self.data = json.load(f)\n self.categories = self.data[\"categories\"]\n self.products = self.data[\"products\"]\n return self.categories, self.products", "def load(self) -> None:\n self._load_data()\n self._load_poses()\n self._load_timestamps()", "def data_loader(root, batch_size=64):\n input_transform = get_transform()\n dataset = CustomDataset(root, input_transform)\n return data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=False)", "def load_data(self,split='train'):\n raise NotImplementedError", "def load(self, theList: DoubleLinkList):\n nextId = self.loadHeadId()\n while nextId:\n rec = self.db.selectById(self.tableName, nextId)\n theList.addNode(appendIt=True, nodeId=rec['nodeId'], childId=rec['childId'],\n label=rec['label'])\n nextId = rec['nextId']", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()", "def _do_load_page(self, **kwargs):\n _tree = kwargs['tree']\n _row = kwargs['row']\n _error_code = 0\n _user_msg = \"\"\n _debug_msg = \"\"\n\n _data = []\n _model = self.treeview.get_model()\n\n _node = _tree.nodes[SortedDict(_tree.nodes).keys()[0]]\n _entity = _node.data\n try:\n if _entity.is_mission:\n _icon = gtk.gdk.pixbuf_new_from_file_at_size(\n self._dic_icons['mission'], 22, 22)\n _data = [\n _icon, _entity.mission_id, _entity.description, '',\n _entity.time_units, 0.0, _entity.mission_time, 0.0, 0.0,\n _node.identifier, 0, 'mission'\n ]\n _new_row = None\n\n elif _entity.is_phase:\n _icon = gtk.gdk.pixbuf_new_from_file_at_size(\n self._dic_icons['phase'], 22, 22)\n _data = [\n _icon, _entity.phase_id, _entity.name, _entity.description,\n '', _entity.phase_start, _entity.phase_end, 0.0, 0.0,\n _node.identifier, 0, 'phase'\n ]\n\n elif _entity.is_env:\n _icon = gtk.gdk.pixbuf_new_from_file_at_size(\n self._dic_icons['environment'], 22, 22)\n _data = [\n _icon, _entity.environment_id, _entity.name, '',\n _entity.units, _entity.minimum, _entity.maximum,\n _entity.mean, _entity.variance, _node.identifier, 1,\n 'environment'\n ]\n\n try:\n _new_row = _model.append(_row, _data)\n except TypeError:\n _error_code = 1\n _user_msg = _(u\"One or more Usage Profile line items had the \"\n u\"wrong data type in it's data package and is \"\n u\"not displayed in the Usage Profile.\")\n _debug_msg = (\n \"RAMSTK ERROR: Data for Usage Profile ID {0:s} for \"\n \"Revision ID {1:s} is the wrong type for one or \"\n \"more columns.\".format(\n str(_node.identifier), str(self._revision_id)))\n _new_row = None\n except ValueError:\n _error_code = 1\n _user_msg = _(u\"One or more Usage Profile line items was \"\n u\"missing some of it's data and is not \"\n u\"displayed in the Usage Profile.\")\n _debug_msg = (\n \"RAMSTK ERROR: Too few fields for Usage Profile ID \"\n \"{0:s} for Revision ID {1:s}.\".format(\n str(_node.identifier), str(self._revision_id)))\n _new_row = None\n except AttributeError:\n if _node.identifier != 0:\n _error_code = 1\n _user_msg = _(u\"One or more Usage Profile line items was \"\n u\"missing it's data package and is not \"\n u\"displayed in the Usage Profile.\")\n _debug_msg = (\n \"RAMSTK ERROR: There is no data package for Usage \"\n \"Profile ID {0:s} for Revision ID {1:s}.\".format(\n str(_node.identifier), str(self._revision_id)))\n _new_row = None\n\n for _n in _tree.children(_node.identifier):\n _child_tree = _tree.subtree(_n.identifier)\n self._do_load_page(tree=_child_tree, row=_new_row)\n\n _row = _model.get_iter_root()\n self.treeview.expand_all()\n if _row is not None:\n _path = _model.get_path(_row)\n _column = self.treeview.get_column(0)\n self.treeview.set_cursor(_path, None, False)\n self.treeview.row_activated(_path, _column)\n\n return (_error_code, _user_msg, _debug_msg)", "def _init_node_parm(self, key):\n wf_net_conf = WorkFlowNetConfML(key)\n self.model_path = wf_net_conf.model_path\n self.ml_class = wf_net_conf.ml_class\n self.config = wf_net_conf.config\n self.batch_size = 10000\n self.model_type = wf_net_conf.model_type\n\n #Todo 어떻게 꺼내는지 승우씨한테 물어볼것\n _wf_data_conf = wf_data_conf(key.split('_')[0]+'_'+key.split('_')[1]+'_'+'dataconf_node')\n self.data_conf = _wf_data_conf.conf\n self.label = _wf_data_conf.label\n self.cell_feature = _wf_data_conf.cell_feature\n self.cross_cell = _wf_data_conf.cross_cell\n self.extend_cell_feature = _wf_data_conf.extend_cell_feature\n self.label_values = _wf_data_conf.label_values\n\n _wf_data_node = wf_data_node(key.split('_')[0] + '_' + key.split('_')[1] + '_' + 'data_node')\n self.multi_read_flag = _wf_data_node.multi_node_flag\n self.predict_path = _wf_data_node.predict_path", "def _load_neighbors_from_external_source(self) -> None:\r\n # The default implementation is empty, Node relies on a database filled with data.\r\n # Override this method in child classes to let the node load its neighbors from\r\n # an external data source.\r\n pass", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexHistory._loadData(self, data)", "def ndata(self):\n raise Exception(\"Graph store doesn't support access data of all nodes.\")", "def _load_data(self):\n if self._name in BALANCE_DATASET:\n _loader = dataset_loaders[self._name]\n xnp, y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.y_ts = y[test_idx]\n\n else:\n _loader = dataset_loaders[self._name]\n xnp, xp, y = _loader()\n # self.xnp, self.xp, self.y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.xp_tr = xp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.xp_ts = xp[test_idx]\n self.y_ts = y[test_idx]", "def loadData(catalog):\n loadVideos(catalog)\n loadCategories(catalog)", "def load_torch_data(load_data_func):\n\n def torch_loader(dataset, data_path, batch_size, shuffle=True, cuda_device=None, num_workers=1):\n (train_data, val_data), (train_labels, val_labels), label_names = load_data_func(dataset, data_path)\n\n kwargs = {'num_workers': num_workers, 'pin_memory': True} if cuda_device is not None else {}\n kwargs['drop_last'] = True\n\n if type(train_data) == numpy.ndarray:\n train_dataset = TensorDataset(torch.from_numpy(train_data), torch.from_numpy(train_labels))\n val_dataset = TensorDataset(torch.from_numpy(val_data), torch.from_numpy(val_labels))\n elif type(train_data) == scipy.sparse.csr.csr_matrix:\n from sklearn.feature_extraction.text import TfidfTransformer\n tfidf_trans = TfidfTransformer(norm=None)\n tfidf_trans.fit(train_data)\n train_dataset = SparseDataset(train_data, tfidf_trans.idf_)\n val_dataset = SparseDataset(val_data, tfidf_trans.idf_)\n else:\n train_dataset = torchvision.datasets.ImageFolder(train_data)\n val_dataset = torchvision.datasets.ImageFolder(val_data)\n\n train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, **kwargs)\n val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, **kwargs)\n\n return train_loader, val_loader, label_names\n\n return torch_loader", "def _load_train_data(self):\n\n self.train_loader = data.Train_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n self.train_loader.load_data()\n\n # load mean and std\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])", "def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)", "def load(self):", "def load_data(self,split='train'):\n return load_arrow_data(self.config,split)", "def load_values(self):\n # TODO: Add self.prefix and extension\n NetworkTables.loadEntries(self.file.get_filename(), prefix='/vision/' + self.name + '_')", "def load_all(): \n training_data = dict() \n for i in range(7):\n training_data[i+1] = load_data(i+1) \n\n return training_data", "def _init_loaders(self):\n @self.loaders_wrapper(\"nx2nx\")\n def get_nx2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.nx2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n @self.loaders_wrapper(\"neo4j2nx\")\n def get_neo4j2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n\n @self.loaders_wrapper(\"neo4j2edgelist\")\n def get_neo4j2edgelist_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2edgelist_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )\n\n\n @self.loaders_wrapper(\"edgelist2neo4j\")\n def get_edgelist2neo4j_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.edgelist2neo4j_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )", "def _loadchildren():\n return [\n [PingAPI, {}, None],\n ]", "def get_data(self, **kwargs):\n\n self.data = {}\n #node_data = ''\n #link_data = ''\n templates_data = self.request_from_server('templates')\n self.templates = templates_data\n project_data = self.request_from_server('projects')\n for project in project_data:\n project_name = project['name']\n if 'project_name' in kwargs:\n if project_name != kwargs['project_name']:\n continue\n\n self.data[project_name] = {}\n self.data[project_name]['project_id'] = project['project_id']\n self.data[project_name]['nodes'] = {}\n node_data = self.request_from_server('projects/{}/nodes'.format(project['project_id']))\n link_data = self.request_from_server('projects/{}/links'.format(project['project_id']))\n for node in node_data:\n node_name = node['name']\n self.data[project_name]['nodes'][node_name] = {}\n self.data[project_name]['nodes'][node_name]['node_id'] = node['node_id']\n self.data[project_name]['nodes'][node_name]['template_id'] = node['template_id']\n self.data[project_name]['nodes'][node_name]['node_type'] = node['node_type']\n self.data[project_name]['nodes'][node_name]['console_port'] = node['console']\n self.data[project_name]['nodes'][node_name]['console_session'] = None\n self.data[project_name]['nodes'][node_name]['x'] = node['x']\n self.data[project_name]['nodes'][node_name]['y'] = node['y']\n self.data[project_name]['nodes'][node_name]['ports'] = {}\n if project['status'] != 'closed':\n self.data[project_name]['nodes'][node_name]['status'] = node['status']\n for port in node['ports']:\n port_name = port['short_name']\n self.data[project_name]['nodes'][node_name]['ports'][port_name] = {}\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['adapter_number'] = port['adapter_number']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['port_number'] = port['port_number']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_type'] = port['link_type']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_id'] = None\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['in_use'] = False\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = None\n for link in link_data:\n for link_node in link['nodes']:\n if node['node_id'] == link_node['node_id']:\n if link_node['label']['text'] == port_name:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_id'] = link['link_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['in_use'] = True\n if link['nodes'].index(link_node) == 0:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to_id'] = link['nodes'][1]['node_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = self.get_node_name_from_id(project_name,link['nodes'][1]['node_id'])\n else:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to_id'] = link['nodes'][0]['node_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = self.get_node_name_from_id(project_name,link['nodes'][0]['node_id'])", "def _load_data(self):\n data_x, data_y = make_classification(n_samples=5000, n_features=20,\n n_informative=10,\n n_redundant=0, n_repeated=0,\n n_classes=2,\n n_clusters_per_class=4,\n weights=None, flip_y=0.01,\n class_sep=1.0, hypercube=True,\n shift=0.0, scale=1.0,\n shuffle=True,\n random_state=self.args.rand_seed)\n\n self.orig_column_names = np.arange(data_x.shape[-1])\n self.data_x = data_x\n self.data_y = self.to_one_hot_encoding(data_y)\n self.numerical_idx = np.arange(data_x.shape[-1])\n self.non_num_idx = None\n self.all_non_numerical_idx = None\n\n # Calculate adjacency matrix\n self.meta_inf = data_x[:, :1].astype('float32')\n if self.args.graph_type:\n self.adj = self.get_adjacency()", "def load_data():\n if _LOCATIONS_BY_ID:\n return _LOCATIONS_BY_NAME, _LOCATIONS_BY_ID\n\n # We need to read the locations in order of country -> admin level 1 -> admin level 2 -> city.\n # This is so that the higher resolution locations can look up the lower resolution locations\n # that they belong to, and compute the necessary fields.\n countries_by_code = _load_country_data(_DATA_FILES['country'])\n admin1_by_code = _load_admin1_data(_DATA_FILES['admin_1'], countries_by_code)\n admin2_by_code = _load_admin2_data(_DATA_FILES['admin_2'], countries_by_code, admin1_by_code)\n _load_city_data(_DATA_FILES['city'], countries_by_code, admin1_by_code, admin2_by_code)\n _add_alternate_names(_DATA_FILES['alt_wiki_names'])\n _add_estimated_importances(_DATA_FILES['estimated_importance'])\n\n return _LOCATIONS_BY_NAME, _LOCATIONS_BY_ID", "def load_datapair(self, ds):\n raise NotImplementedError(\"Define this in your derived checker class\")", "def loadData(catalog):\n\n loadArtwork(catalog)\n loadArtists(catalog)", "def data_loader(edges,features,y):\n\n\n edge_index = torch.tensor(edges, dtype=torch.long)\n edge_index = edge_index.t().contiguous()\n x = torch.tensor(features.todense(), dtype=torch.float)\n\n y = torch.tensor(y)\n\n data = Data(x=x, edge_index=edge_index, y = y)\n\n return data", "def load_data(self, X, loss):\n\n self.X = X\n self.tags = pd.DataFrame(loss)\n\n self.index = [_ALL]\n\n self.X_all = pd.concat([self.X_all , self.X], axis = 0, ignore_index=True)\n self.tags_all = pd.concat([self.tags_all, self.tags], axis = 0, ignore_index=True)", "def file_loader(self):\n\n for folder in self.config[\"data_folders\"]:\n f = os.path.join(folder, self.data_file)\n yield jsonlist.load_file(f)", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexSession._loadData(self, data)", "def load_data(dataset, root, batch_size, workers):\n # Data transform\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n train_transform = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n query_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])\n\n # Construct data loader\n index = dataset.index(\"IF\")\n sub = dataset[index:]\n if sub == 'IF100':\n train_dir = os.path.join(root, 'train-IF100')\n elif sub == 'IF50':\n train_dir = os.path.join(root, 'train-IF50')\n elif sub == 'IF20':\n train_dir = os.path.join(root, 'train-IF20')\n elif sub == 'IF10':\n train_dir = os.path.join(root, 'train-IF10')\n elif sub == 'IF1':\n train_dir = os.path.join(root, 'train-IF1')\n else:\n print('train path error')\n return\n # train_dir = os.path.join(root, 'train')\n query_dir = os.path.join(root, 'query')\n database_dir = os.path.join(root, 'database')\n\n train_dataset = ImagenetDataset(\n train_dir,\n transform=train_transform,\n targets_transform=Onehot(100),\n )\n\n train_dataloader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=workers,\n pin_memory=True,\n )\n\n query_dataset = ImagenetDataset(\n query_dir,\n transform=query_transform,\n targets_transform=Onehot(100),\n )\n\n query_dataloader = DataLoader(\n query_dataset,\n batch_size=batch_size,\n num_workers=workers,\n pin_memory=True,\n )\n\n database_dataset = ImagenetDataset(\n database_dir,\n transform=query_transform,\n targets_transform=Onehot(100),\n )\n\n database_dataloader = DataLoader(\n database_dataset,\n batch_size=batch_size,\n num_workers=workers,\n pin_memory=True,\n )\n\n return train_dataloader, query_dataloader, database_dataloader", "def __init__(self, ops: Callable, batch_size: int = 4,\n num_workers: int = 8, path_to_data: str = './project/dataset/few_shot/'):\n super(FewShotDataModule, self).__init__()\n\n self.ops = ops\n self.path_to_data = path_to_data\n self.batch_size = batch_size\n self.num_workers = num_workers\n\n self.splits = {} # Contains train and valid splits.\n self.datasets = {} # Contains instances of the Dataset class. One per data spit.\n self.class_map = dict(zip(CLASS_NAMES, range(len(CLASS_NAMES))))\n self.weights = [0] * len(CLASS_NAMES)", "def _read_data(self):", "def __init__(self, data_root, data_transform=None, pos_neighbor=False, neg_neighbor=False, eval_mode=False, eval_num_retrieval=10):\n assert type(eval_num_retrieval)==int, 'eval_num_retrieval: expected int but get {}'.format(type(eval_num_retrieval))\n self.data_root = data_root\n self.images_path = '{}/images'.format(self.data_root)\n self.data_transform = data_transform\n self.pos_neighbor = pos_neighbor\n self.neg_neighbor = neg_neighbor\n self.eval_mode = eval_mode\n self.eval_num_retrieval = eval_num_retrieval\n \n self.images_info = [ line.split() for line in open('{}/images_info.txt'.format(self.data_root)) ]\n self.super_class_ids = [ int(line.split()[0]) for line in open('{}/super_class_ids.txt'.format(self.data_root)) ]\n \n self.label_to_class = dict()\n self.class_to_image_id = dict()\n self.id_to_index = dict()\n for ix, (image_info) in enumerate(self.images_info):\n super_class_id, class_id, image_id, image_name = image_info\n super_class_id, class_id, image_id = int(super_class_id), int(class_id), int(image_id)\n\n if super_class_id not in self.label_to_class.keys():\n self.label_to_class[super_class_id] = list()\n self.label_to_class[super_class_id].append(class_id)\n\n if class_id not in self.class_to_image_id.keys():\n self.class_to_image_id[class_id] = list()\n self.class_to_image_id[class_id].append(image_id)\n self.id_to_index[image_id] = ix", "def load(self, *args, **kwargs):\n pass", "def __init__(self, data):\n self.bees = [Bee(b) for b in data[\"bees\"]]\n self.flowers = [Flower(f) for f in data[\"flowers\"]]\n self.hives = [Hive(h) for h in data[\"hives\"]]", "def _load_data(self):\n if self._api_response.status_code == 200:\n self._dataset = self._api_response.json()\n self._fill_day_dicts()", "def _load_data(self, cfg):\r\n\r\n if self._split == \"train\":\r\n self._annotations = self._load_lists(cfg.EGO4D_STA.TRAIN_LISTS)\r\n elif self._split == \"val\":\r\n self._annotations = self._load_lists(cfg.EGO4D_STA.VAL_LISTS)\r\n else:\r\n self._annotations = self._load_lists(cfg.EGO4D_STA.TEST_LISTS)", "def load(cls, *args, **kwargs):\n raise NotImplementedError('missing data mixin')", "def get_data(n = None):\n files = download_data()\n for path in files.values():\n extract_data(path)\n class_fnames, labelmap = explore_data()\n X, y = load_data(class_fnames)\n return prepare_data(X, y, n = n)", "def _read_data(self) -> MMD:\n\t\tif self.config.source_type == SourceType.LOCAL_FILE:\n\t\t\treturn self._read_files()\n\t\telif self.config.source_type == SourceType.HDFS:\n\t\t\treturn self._read_hdfs()\n\t\telif self.config.source_type == SourceType.NEO4J:\n\t\t\treturn self._read_neo4j(self.config.graph_db)\n\n\t\telse:\n\t\t\traise NotImplementedError(\"The source type {} has not been implemented yet.\".format(loader_config.source_type))", "def load_data(self):\n if self.debug:\n print(\"Loading data\")", "def _load_dataset(self, split, align, partition):\n\n if partition == 'all':\n self._image_list = self._face.image_list + self._clothes.image_list\n celeba_num = self._face.num_images\n deepfashion_num = self._clothes.num_images\n elif partition == 'face':\n self._image_list = self._face.image_list\n celeba_num = self._face.num_images\n deepfashion_num = 0\n elif partition == 'clothes':\n self._image_list = self._clothes.image_list\n celeba_num = 0\n deepfashion_num = self._clothes.num_images\n\n self._gtdb = {'attr': -1.*np.ones((self.num_images, self.num_classes), dtype=np.float64)}\n\n # load labels for celeba images if they are included. \n if celeba_num > 0:\n self._gtdb['attr'][:celeba_num, self._face_class_idx] = self._face.gtdb['attr']\n # load soft labels for clothes attributes on celeba\n if align:\n fn = osp.join(self.data_path, 'person_'+'face'+'_'+split+'_align.pkl')\n else:\n fn = osp.join(self.data_path, 'person_'+'face'+'_'+split+'.pkl') \n if osp.exists(fn):\n if partition == 'all':\n with open(fn, 'rb') as fid:\n labels = cPickle.load(fid)\n self._gtdb['attr'][:celeba_num, self._clothes_class_idx] = labels\n else:\n 'Dataset {}: Labels for clothes attributes on CelebA are not loaded, the partition is not \"all\"'.format(self.name)\n else:\n print 'Dataset {}: Labels for clothes attributes on CelebA are not available! Missing filename: {}. Did you forget to run load_person.py first?'.\\\n format(self.name, fn)\n\n # load labels for deepfashion images if they are included.\n if deepfashion_num > 0:\n self._gtdb['attr'][celeba_num:, self._clothes_class_idx] = self._clothes.gtdb['attr']\n # load soft labels for face attributes on deepfashion\n fn = osp.join(self.data_path, 'person_'+'clothes'+'_'+split+'.pkl')\n if osp.exists(fn):\n if partition == 'all':\n with open(fn, 'rb') as fid:\n labels = cPickle.load(fid)\n self._gtdb['attr'][celeba_num:, self._face_class_idx] = labels\n else:\n 'Dataset {}: Labels for face attributes on Deepfashion are not loaded, the partition is not \"all\"'.format(self.name)\n else:\n print 'Dataset {}: Labels for face attributes on Deepfashion are not available! Missing filename: {}. Did you forget to run load_person.py first?'.\\\n format(self.name, fn)", "def _load_data(self):\n\n from sklearn.datasets import fetch_openml\n mnist = fetch_openml('mnist_784', cache=True)\n # data_x = np.array(final_data_df)\n feat_data = np.array(mnist.data).astype('float32')\n target_data = mnist.target.astype('int64')\n shuffling_index = np.arange(feat_data.shape[0])\n np.random.shuffle(shuffling_index)\n feat_data = feat_data[shuffling_index]\n target_data = target_data[shuffling_index]\n\n cur_data_list = []\n cur_target_list = []\n for i in range(10):\n cur_mask = target_data == i\n cur_data_list.append(feat_data[cur_mask][:500])\n cur_target_list.append(target_data[cur_mask][:500])\n feat_data = np.concatenate(cur_data_list)\n target_data = np.concatenate(cur_target_list)\n\n self.data_x = feat_data\n self.data_y = self.to_one_hot_encoding(target_data)\n self.numerical_idx = np.arange(784)\n self.non_num_idx = None\n\n # Calculate adjacency matrix\n self.meta_inf = self.data_x.astype('float32')\n\n if self.args.graph_type:\n self.adj = self.get_adjacency()", "def _load(self):\n raise NotImplementedError()", "def load_models(self):\n logger.info('Loading {name} data'.format(name=self.__class__.__name__))\n for type_name, type_ in self.data_dict.iteritems():\n # An exclude for correlations. Isn't created nor has an ID.\n if type_name == \"correlations_main\":\n continue\n task_response = self.do_task(\n self.types[type_name],\n type_['taskId']\n )\n self.data_dict[type_name]['job_id'] = json.loads(\n task_response.content\n )['JobId']\n logger.info(\n 'Load {name} response: '.format(name=type_name) +\n task_response.content\n )\n\n print(\"Loaded model\")", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)", "def load(dataset_name='processed_500maxnode'):\n # data_dir = os.path.join('../data/modelnet40_ply_hdf5_2048')\n # assert os.path.exists(data_dir)\n\n processed_data_dir = os.path.join(DATA_DIR, dataset_name)\n if not os.path.exists(processed_data_dir):\n os.makedirs(processed_data_dir)\n\n if len(os.listdir(processed_data_dir)) != 0:\n\n # print(\"Loading Saved Data from Disk.......\")\n\n \"\"\" pre-defined location for saving the train and test data\"\"\"\n train_dir = os.path.join(processed_data_dir, 'train')\n test_dir = os.path.join(processed_data_dir, 'test')\n\n train, max_node = load_back_from_disk(data_dir=train_dir, istrain=True)\n test, max_node_test = load_back_from_disk(data_dir=test_dir, istrain=False)\n max_node = max(max_node, max_node_test)\n\n else:\n train, test, max_node = featurize(\n processed_data_dir,\n shard_size=16)\n\n return train, test, max_node", "def __init__(self):\n self._data=[]", "def __init__(self, data_config):\n self._brands = self._load_from_directory(data_config['targeted_brands_dir'])\n self._keywords = self._load_from_directory(data_config['keywords_dir'])\n self._fqdn_keywords = self._load_from_directory(data_config['fqdn_keywords_dir'])\n self._similarity_words = self._load_from_directory(data_config['similarity_words_dir'])\n self._tlds = self._load_from_directory(data_config['tld_dir'])" ]
[ "0.7005817", "0.6853901", "0.68161523", "0.67189497", "0.65886134", "0.6483465", "0.64096814", "0.6365568", "0.63560086", "0.62556756", "0.62167966", "0.6192285", "0.618386", "0.6101325", "0.6019031", "0.5991078", "0.59765136", "0.59683806", "0.5959553", "0.59291226", "0.5928217", "0.59135383", "0.5895583", "0.58914244", "0.5889283", "0.5875959", "0.587575", "0.58733785", "0.5872307", "0.5867697", "0.58609784", "0.585101", "0.5831282", "0.5830698", "0.5815881", "0.5809611", "0.57950985", "0.5785362", "0.57824403", "0.578208", "0.5778016", "0.57742006", "0.57726336", "0.57600325", "0.5759646", "0.574287", "0.5742074", "0.5741495", "0.5740415", "0.57120377", "0.57118714", "0.57084125", "0.569452", "0.568704", "0.5685346", "0.5677342", "0.56737536", "0.56694144", "0.56683946", "0.56669205", "0.56516117", "0.5649353", "0.5641757", "0.5630722", "0.56306434", "0.5626169", "0.5617531", "0.56141794", "0.56139904", "0.5604401", "0.5599164", "0.55944556", "0.55932474", "0.5591869", "0.55900115", "0.55898094", "0.5582631", "0.55765283", "0.5575889", "0.55738574", "0.557117", "0.5569049", "0.55680007", "0.55616087", "0.5558998", "0.5558516", "0.55581576", "0.55545104", "0.55494505", "0.5540667", "0.5540055", "0.55324477", "0.5532431", "0.55313563", "0.5531041", "0.55280477", "0.5525894", "0.55225396", "0.551985", "0.5517044", "0.5513114" ]
0.0
-1
Separate data into number of agents
def partition(self, to_partition, indices, nr_agents): return [to_partition[indices[i]:indices[i + 1]] for i in range(nr_agents)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_number_of_agents(model):\n\n n_agents = len(model.schedule.agents_by_type['Customer'])\n return n_agents", "def get_agent_number_of_players(players):\n return sum([count_players(player) for player in players\n if player.startswith('agent')])", "def parse_data(self, data):\n pids = set()\n cams = set()\n \n for info in data:\n pids.add(info[1])\n cams.add(info[2])\n return len(pids), len(cams)", "def agentCounter(gameState, index, depth):\n if index == gameState.getNumAgents():\n return [depth-1, 0]\n else:\n return [depth, index]", "def _count_parties(data_set): #DEMOCRATS, THEN REPUBLICANS\r\n reps = 0\r\n dems = 0\r\n for data_point in data_set:\r\n if data_point.dat_party == \"R\": reps+=1\r\n if data_point.dat_party == \"D\": dems+=1\r\n\r\n return (dems, reps)", "def how_many(self):\n\tprint \"We have {0} robots !!!\".format(self.population)", "def test_counter_agent_history(self):\n config = {\n 'name': 'CounterAgent',\n 'network_params': {\n 'path': join(ROOT, 'test.gexf')\n },\n 'network_agents': [{\n 'agent_type': 'AggregatedCounter',\n 'weight': 1,\n 'state': {'id': 0}\n\n }],\n 'max_time': 10,\n 'environment_params': {\n }\n }\n s = simulation.from_config(config)\n env = s.run_simulation(dry_run=True)[0]\n for agent in env.network_agents:\n last = 0\n assert len(agent[None, None]) == 10\n for step, total in sorted(agent['total', None]):\n assert total == last + 2\n last = total", "def get_agent(drs):\n agents = []\n for line in drs:\n if line.strip().startswith('sem'):\n datalist = line.split(':')\n for word in datalist:\n if word.count('agent') > 0:\n variable = word[6:7]\n for word in datalist:\n if word.startswith('pred({0}'.format(variable)):\n agents.append(word.split(',')[1])\n return agents", "def generate_statistics(self, timestep):\n total_friends = 0\n total_enemies = 0\n affinity_entries = 0\n num_online = len(self.online_agents)\n for agent in self.online_agents:\n total_friends += len(agent.friends)\n total_enemies += len(agent.enemies)\n affinity_entries += len(agent.affinity_map)\n self.logger.log(3, \"round %d: %d agents, each average of %d friend(s), %d unfriend(s), %d people known\" %\n (timestep, num_online, total_friends / num_online, total_enemies / num_online,\n affinity_entries / num_online))\n self.logger.log(3, \"Relationship between online agents 0 and 1 (degrees of separation): %r\" %\n (find_degrees_of_separation(self.online_agents[0], self.online_agents[1])))\n\n # Randomly pick a couple pairs of agents and check to see how many degrees of separation there are between\n # those two agents.\n num_users_to_average_separation = int(len(self.online_agents) / 200)\n deg_sep = 0\n unknowns = 0\n for x in range(num_users_to_average_separation):\n a1 = random.randint(0, len(self.online_agents)-1)\n a2 = a1\n while a2 == a1:\n a2 = random.randint(0, len(self.online_agents)-1)\n sep = find_degrees_of_separation(self.online_agents[a1], self.online_agents[a2])\n if sep is not None:\n deg_sep += sep\n else:\n unknowns += 1\n\n if num_users_to_average_separation != unknowns:\n deg_sep = int(deg_sep / (num_users_to_average_separation - unknowns))\n\n self.logger.log(3, \"%d random user pairs whom have a chain of connection, the average length of\"\n \" that chain is %d. %d had no path to other agent.\" %\n (num_users_to_average_separation, deg_sep, unknowns))\n\n self.logger.log(3, \"There were %d messages sent and %d messages received this round.\" %\n (self.messages_sent, self.messages_received))\n\n self.logger.log(3, \"------------\")\n self.total_messages_received += self.messages_received\n self.total_messages_sent += self.messages_sent\n self.messages_sent = 0\n self.messages_received = 0", "def make_individual_agents_2016(self):\r\n for hh_row in agents: # agents is a list of ints 1-94 from excel_import\r\n individual_id_list = return_values(hh_row, 'name')\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.hh_id = hh_id\r\n agelist = return_values(hh_row, 'age') # find the ages of people in hh\r\n genderlist = return_values(hh_row, 'gender')\r\n marriagelist = return_values(hh_row, 'marriage')\r\n educationlist = return_values(hh_row, 'education')\r\n income_local_off_farm = float(return_values(hh_row, 'income_local_off_farm'))\r\n income_local_off_farm_list[hh_row - 1] = income_local_off_farm\r\n household_income_list[hh_row - 1] = household_income_list[hh_row - 1] + income_local_off_farm\r\n if individual_id_list is not None and individual_id_list is not []:\r\n for i in range(len(individual_id_list)):\r\n self.individual_id = str(self.hh_id) + str(individual_id_list[i]) # example: 2c\r\n self.age = int(agelist[i])\r\n # if genderlist is not None and genderlist is not []:\r\n self.gender = int(genderlist[i])\r\n try:\r\n self.education = educationlist[i]\r\n except:\r\n self.education = 0\r\n self.marriage = marriagelist[i]\r\n IndividualAgent.create_initial_migrant_list(self, hh_row)\r\n self.age_at_step_0 = self.age\r\n self.income_local_off_farm = return_values(self.hh_row, 'income_local_off_farm')\r\n ind = IndividualAgent(hh_row, self, self.hh_id, self.individual_id, self.age, self.gender,\r\n self.education, self.marriage, self.past_hh_id, self.non_gtgp_area,\r\n self.step_counter, self.age_at_step_0, self.income_local_off_farm)\r\n self.schedule.add(ind)", "def count_houses_delivered_with_robot(s):\n s_santa, s_robot = s[::2], s[1::2]\n deliveries_santa = make_deliveries(s_santa)\n deliveries_robot = make_deliveries(s_robot)\n all_deliveries = combine_dicts(deliveries_santa, deliveries_robot, lambda x,y: x+y, 0)\n return len(all_deliveries)", "def how_many(cls):\n print(\"We have {:d} robots.\".format(cls.population))", "def how_many(cls):\n print(\"We have {:d} robots.\".format(cls.population))", "def get_number_of_ver_sponsors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'VerificationSponsor'])\n return n_agents", "def get_num_cams(self, data):\n cams = set()\n for items in data:\n camid = items[2]\n cams.add(camid)\n return len(cams)", "def get_number_of_char_sponsors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'CharitableSponsor'])\n return n_agents", "def analyze_agency(agency_list, keys):\r\n output = \"\"\r\n key_count = []\r\n agents = nltk.word_tokenize(keys)\r\n for x in range(len(agents)):\r\n key_count.append([0, 0, 0])\r\n for entry in agency_list:\r\n for x in range(len(agents)):\r\n for word in entry[\"sent\"]: \r\n if word.lower() == agents[x]:\r\n key_count[x][0] += 1\r\n for word in entry[\"subj\"]:\r\n if word.lower() == agents[x]:\r\n key_count[x][1] += 1\r\n for word in entry[\"dobj\"]:\r\n if word.lower() == agents[x]:\r\n key_count[x][2] += 1\r\n for word in entry[\"propj\"]:\r\n if word.lower() == agents[x]:\r\n key_count[x][2] += 1\r\n \r\n for x in range(len(agents)):\r\n output += (agents[x] + \" occured \" + str(key_count[x][0]) + \" times, was the subject \" + str(key_count[x][1]) +\r\n \" times, and the object \" + str(key_count[x][2]) + \" times.\" + \"\\n\")\r\n \r\n return output", "def get_number_of_investors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'Investor'])\n return n_agents", "def get_agent_count(self, i: int, j: int, dist: str = 'current') -> int:\n return int(self._dist[dist][i, j] / self._param['size_fraction'])", "def choose_target(self, agents):\n\n number_of_suspects = [0]*(len(agents))\n number_of_suspects_per_agent = []\n\n index = 0\n for a1 in agents:\n if not a1.is_impostor():\n for a2 in agents:\n if self.km.suspects(a1.agent_id, a2.agent_id):\n number_of_suspects[index] = number_of_suspects[index] + 1\n else:\n number_of_suspects[index] = 999999\n number_of_suspects_per_agent.append((a1.agent_id,number_of_suspects[index]))\n index = index + 1\n\n self.target = min(number_of_suspects_per_agent, key = lambda t: t[1])[0]", "def setupDistribution(tournamentsWon1):\n timesWon = np.sort(np.unique(tournamentsWon1))\n numberTimesWon = np.zeros_like(timesWon)\n for i in range (len(timesWon)):\n numberTimesWon[i] = count(tournamentsWon1, timesWon[i])\n return timesWon, numberTimesWon", "def slot_numbers_for_driver_of_age(data):\n age = int(data['driver_age'])\n slot_numbers = []\n for i in range(1, PARKING_LOT[0] + 1):\n if PARKING_LOT[i].vehicle_driver_age == age:\n slot_numbers.append(i)\n return str(slot_numbers).strip('[]').replace(' ', '') if slot_numbers else 'null'", "def divide_annotations(annotation_lines):\n\n annotation_lists = []\n current_agent_num = 0\n\n annotation_lists.append([])\n for line in annotation_lines:\n line = line.split()\n read_agent_num = int(line[0])\n if read_agent_num == current_agent_num:\n annotation_lists[current_agent_num].append(line)\n else:\n annotation_lists.append([])\n current_agent_num += 1\n annotation_lists[current_agent_num].append(line)\n return annotation_lists", "def per_cell_animal_count(self):\n print self.island.individuals()", "def task3a(self):\n browser_count = {}\n for entry in self.records:\n if((entry['visitor_device'] == 'browser') and (entry['event_type'] == 'read')):\n browser = entry['visitor_useragent']\n if (browser in browser_count):\n browser_count[entry['visitor_useragent']] += 1\n else:\n browser_count[entry['visitor_useragent']] = 1\n GUI.show_histo(browser_count, \"vert\", \"Number of Accesses using Browser\", \"Browser Distribution\")", "def _init_agents(self):\n self.agents = [Agent(e=0.1, a=0.1, row=self.row, col=self.col) for i in range(2)]", "def uniform_split(self, nr_agents):\n indices = np.linspace(start=0, stop=self.samples.shape[0], num=nr_agents + 1, dtype=int).tolist()\n\n self.samples = self.partition(self.samples, indices, nr_agents)\n self.labels = self.partition(self.labels, indices, nr_agents)", "def test_10(self):\n for _ in range(2):\n params_spec, options_spec = generate_random_model()\n respy_obj = RespyCls(params_spec, options_spec)\n respy_obj = simulate_observed(respy_obj)\n\n num_agents_est = respy_obj.get_attr(\"num_agents_est\")\n\n data_array = process_dataset(respy_obj).to_numpy()\n\n py = np.bincount(data_array[:, 0].astype(int))\n f90 = fort_debug.wrapper_get_num_obs_agent(data_array, num_agents_est)\n\n assert_almost_equal(py, f90)", "def get_number_of_contributors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'Contributor'])\n return n_agents", "def occupation_distribution(data):", "def how_many(cls):\n #cls.population equivalent to Robot.population\n print(\"We have {:d} robots.\".format(cls.population))", "def test_counter_agent(self):\n config = {\n 'name': 'CounterAgent',\n 'network_params': {\n 'path': join(ROOT, 'test.gexf')\n },\n 'agent_type': 'CounterModel',\n 'states': [{'times': 10}, {'times': 20}],\n 'max_time': 2,\n 'num_trials': 1,\n 'environment_params': {\n }\n }\n s = simulation.from_config(config)\n env = s.run_simulation(dry_run=True)[0]\n assert env.get_agent(0)['times', 0] == 11\n assert env.get_agent(0)['times', 1] == 12\n assert env.get_agent(1)['times', 0] == 21\n assert env.get_agent(1)['times', 1] == 22", "def split_agents(agents, prox_size):\n n = len(agents)\n num_proxers = int(math.ceil(float(n) / prox_size))\n\n sub_agents = [set() for _ in xrange(num_proxers)]\n for i, agent in enumerate(agents):\n sub_agents[i % num_proxers].add(agent)\n\n return Accountant([Proxer(lst) for lst in sub_agents])", "def __init__(self, agents: List[Agent], w: int, h: int, active=True):\n self.active = active\n self.agents = agents\n self.cat = [[list() for _ in range(w)] for _ in range(h)]\n self.length = dict()", "def make_land_agents_2016(self):\r\n # add non-gtgp\r\n for hh_row in agents: # from excel_import\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.total_rice = return_values(hh_row, 'non_gtgp_rice_mu')\r\n if self.total_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.total_dry = return_values(hh_row, 'non_gtgp_dry_mu')\r\n if self.total_dry in ['-3', '-4', -3, None]:\r\n self.total_dry = 0\r\n self.gtgp_rice = return_values(hh_row, 'gtgp_rice_mu')\r\n if self.gtgp_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.gtgp_dry = return_values(hh_row, 'gtgp_dry_mu')\r\n if self.gtgp_dry in ['-3', '-4', -3, None]:\r\n self.gtgp_dry = 0\r\n\r\n landposlist = self.determine_landpos(hh_row, 'non_gtgp_latitude', 'non_gtgp_longitude')\r\n self.age_1 = return_values(hh_row, 'age')[0]\r\n self.gender_1 = return_values(hh_row, 'gender')[0]\r\n self.education_1 = return_values(hh_row, 'education')[0]\r\n\r\n for landpos in landposlist:\r\n try:\r\n self.pre_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n\r\n try:\r\n self.non_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.land_time = return_values(hh_row, 'non_gtgp_travel_time')[landposlist.index(landpos)]\r\n try:\r\n self.plant_type = return_values(hh_row, 'non_gtgp_plant_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_type = return_values(hh_row, 'non_gtgp_land_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.hh_size = len(return_values(hh_row, 'age'))\r\n self.gtgp_enrolled = 0\r\n lp = LandParcelAgent(hh_row, self, hh_id, hh_row, landpos, self.gtgp_enrolled,\r\n self.age_1, self.gender_1, self.education_1,\r\n self.gtgp_dry, self.gtgp_rice, self.total_dry, self.total_rice,\r\n self.land_type, self.land_time, self.plant_type, self.non_gtgp_output,\r\n self.pre_gtgp_output)\r\n self.space.place_agent(lp, landpos)\r\n self.schedule.add(lp)\r\n if self.gtgp_enrolled == 0 and landpos not in nongtgplist and landpos not in gtgplist:\r\n nongtgplist.append(landpos)\r\n # except:\r\n # pass\r\n\r\n # add gtgp\r\n for hh_row in agents: # from excel_import\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.total_rice = return_values(hh_row, 'non_gtgp_rice_mu')\r\n if self.total_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.total_dry = return_values(hh_row, 'non_gtgp_dry_mu')\r\n if self.total_dry in ['-3', '-4', -3, None]:\r\n self.total_dry = 0\r\n self.gtgp_rice = return_values(hh_row, 'gtgp_rice_mu')\r\n if self.gtgp_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.gtgp_dry = return_values(hh_row, 'gtgp_dry_mu')\r\n if self.gtgp_dry in ['-3', '-4', -3, None]:\r\n self.gtgp_dry = 0\r\n landposlist = self.determine_landpos(hh_row, 'gtgp_latitude', 'gtgp_longitude')\r\n self.age_1 = return_values(hh_row, 'age')[0]\r\n self.gender_1 = return_values(hh_row, 'gender')[0]\r\n self.education_1 = return_values(hh_row, 'education')[0]\r\n for landpos in landposlist:\r\n try:\r\n self.pre_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.non_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_time = return_values(hh_row, 'gtgp_travel_time')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.plant_type = return_values(hh_row, 'pre_gtgp_plant_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_type = return_values(hh_row, 'pre_gtgp_land_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.hh_size = len(return_values(hh_row, 'age'))\r\n self.gtgp_enrolled = 1\r\n\r\n lp_gtgp = LandParcelAgent(hh_id, self, hh_id, hh_row, landpos, self.gtgp_enrolled,\r\n self.age_1, self.gender_1, self.education_1,\r\n self.gtgp_dry, self.gtgp_rice, self.total_dry, self.total_rice,\r\n self.land_type, self.land_time, self.plant_type, self.non_gtgp_output,\r\n self.pre_gtgp_output)\r\n self.space.place_agent(lp_gtgp, landpos)\r\n self.schedule.add(lp_gtgp)\r\n if self.gtgp_enrolled == 1 and landpos not in gtgplist and landpos in nongtgplist:\r\n gtgplist.append(landpos)", "def numberize(self, vocabs):\n event_type_stoi = vocabs['event_type']\n role_type_stoi = vocabs['role_type']\n trigger_label_stoi = vocabs['trigger_label']\n\n data = []\n for inst in self.data:\n tokens = inst['tokens']\n sent_id = inst['sent_id']\n events = inst['event_mentions']\n sent = inst['sentence']\n doc_id = inst['doc_id']\n events.sort(key=lambda x: x['trigger']['start'])\n token_num = len(tokens)\n\n trigger_list = [(e['trigger']['start'], e['trigger']['end'],\n event_type_stoi[e['event_type']])\n for e in events]\n\n role_list = get_role_list(events, role_type_stoi)\n\n # Graph\n graph = Graph(\n triggers=trigger_list,\n roles=role_list,\n vocabs=vocabs,\n )\n\n instance = Instance(\n doc_id = doc_id,\n sent_id=sent_id,\n sentence = sent,\n tokens=tokens,\n graph=graph,\n events = events,\n trigger_num=len(events),\n )\n data.append(instance)\n self.data = data", "def make_hh_agents_2016(self):\r\n for hh_row in agents: # agents is a list of ints 1-97 from excel_import\r\n self.hhpos = self.determine_hhpos(hh_row, 'house_latitude', 'house_longitude')\r\n self.hh_id = return_values(hh_row, 'hh_id')\r\n self.admin_village = 1\r\n\r\n # 2016\r\n mig_remittances = return_values(hh_row, 'mig_remittances') # remittances of initial migrant\r\n if mig_remittances is None:\r\n mig_remittances = 0\r\n household_income_list[hh_row - 1] = int(mig_remittances)\r\n household_remittances_list[hh_row - 1] = int(mig_remittances)\r\n\r\n if return_values(hh_row, 'initial_migrants') is not None:\r\n out_mig_list[hh_row - 1] = 1\r\n household_migrants_list.append(self.hh_id)\r\n cumulative_mig_list[hh_row - 1] = 1\r\n\r\n num_labor_list[hh_row - 1] = initialize_labor(hh_row)\r\n hh_size_list[hh_row - 1] = len(return_values(hh_row, 'age'))\r\n\r\n a = HouseholdAgent(hh_row, self, self.hh_id, self.admin_village)\r\n self.space.place_agent(a, self.hhpos) # admin_village placeholder\r\n self.schedule.add(a)", "def non_iid_split(self, nr_agents, class_per_node, random):\n unique = list(set(self.labels.tolist()))\n len_unique = len(unique)\n\n # Create array that assigns a class to specific nodes\n # Use 'np.arange' to ensure every class is represented before repeating\n # A row represents nr_agents, a column represents classes per node\n agent_class_master = np.arange(start=0, stop=nr_agents * class_per_node) % len_unique\n np.random.shuffle(agent_class_master)\n agent_class_master = agent_class_master.reshape(nr_agents, class_per_node)\n\n # Split data by labels\n sample_list = [[] for _ in range(len_unique)]\n for i in range(len(self.labels)):\n sample_list[self.labels[i]].append(self.samples[i])\n\n # By class creates uniform or random indices splits to partition data to agents evenly\n class_count = np.bincount(agent_class_master.ravel())\n class_indices = {}\n for i in range(len(class_count)):\n if random:\n indices = sorted(np.random.randint(0, high=len(sample_list[i]), size=class_count[i] - 1).tolist())\n indices = [0] + indices\n indices += [len(sample_list[i])]\n class_indices[i] = indices\n else:\n class_indices[i] = np.linspace(start=0, stop=len(sample_list[i]), num=class_count[i] + 1,\n dtype=int).tolist()\n\n # Main loop that partitions data by the assigned class and proper amount\n all_agents = []\n all_class = []\n for agent in agent_class_master:\n agent_data = []\n agent_class = []\n for cls in agent:\n # Proportioned indices for data and grab correctly indexed data\n temp_indices = class_indices[cls]\n data_for_agent = sample_list[cls][temp_indices[0]:temp_indices[1] - 1]\n\n # Add data and class to this agents list\n agent_data = agent_data + data_for_agent\n agent_class = agent_class + [cls for _ in range(len(data_for_agent))]\n\n # Drop first index since we used that data, forces next person to use next index\n class_indices[cls] = temp_indices[1:]\n\n # Append agents data and class labels in order\n all_agents.append(torch.stack(agent_data))\n all_class.append(torch.tensor(agent_class))\n\n self.samples = all_agents\n self.labels = all_class", "def agent_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"agent_count\")", "def count(lg):\n wikipron = load_wikipron(lg)\n preprocessor = Preprocessor(lg)\n inventory = load_inventory(lg)\n i = set(inventory[\"Phoneme\"])\n result = []\n for form in wikipron[\"phonemes\"]:\n for ch in form.split(\" \"):\n if ch not in i:\n result.append({\"form\": form, \"ch\": ch})\n print(f\"{lg} | {form} | {ch}\")\n result = pd.DataFrame(result)", "def count():", "def amine(listAmine, count):\n \n for type in listAmine.keys():\n for nitrogen in listAmine[type]:\n nbNeighbor = numberNeigthbor(nitrogen[\"neighbors\"])\n for neighbor in nitrogen[\"neighbors\"]:\n if not nbNeighbor in count[type].keys():\n count[type][nbNeighbor] = structure.countElements()\n if not nbNeighbor in count[\"GlobalAmine\"].keys():\n count[\"GlobalAmine\"][nbNeighbor] = structure.countElements()\n\n\n if neighbor[\"element\"] in count[type][nbNeighbor].keys():\n count[type][nbNeighbor][neighbor[\"element\"]] = count[type][nbNeighbor][neighbor[\"element\"]] + 1\n count[\"GlobalAmine\"][nbNeighbor][neighbor[\"element\"]] = count[\"GlobalAmine\"][nbNeighbor][neighbor[\"element\"]] + 1\n\n else:\n count[type][nbNeighbor][\"others\"] = count[type][nbNeighbor][\"others\"] + 1\n count[\"GlobalAmine\"][nbNeighbor][\"others\"] = count[\"GlobalAmine\"][nbNeighbor][\"others\"] + 1", "def splitCount(self):\n return 0", "def testMultiagent(self):\n \n p = Parser(dora_domain.split(\"\\n\"))\n dom = domain.Domain.parse(p.root)\n p = Parser(dora_multiagent.split(\"\\n\"))\n scen = scenario.MapsimScenario.parse(p.root, dom)\n\n self.assert_(\"r2d2\" in scen.agents)\n self.assert_(\"c3po\" in scen.agents)\n self.assert_(\"michael\" in scen.agents)\n worldprob = scen.world\n r2prob = scen.agents[\"r2d2\"]\n c3prob = scen.agents[\"c3po\"]\n mprob = scen.agents[\"michael\"]\n\n self.assertEqual(len(worldprob.init), 5)\n self.assertEqual(len(r2prob.init), 5)\n self.assertEqual(len(c3prob.init), 5)\n self.assertEqual(len(mprob.init), 5)\n\n self.assertEqual(len(worldprob.objects), 7)\n self.assertEqual(len(r2prob.objects), 7)\n self.assertEqual(len(c3prob.objects), 7)\n self.assertEqual(len(mprob.objects), 7)", "def get_tr_list(slot, br_data_df, agent_full_name, agent_directory):\r\n agent_df = pd.read_csv(f'{agent_full_name}.csv', header=0, delimiter=\",\", engine='python')\r\n agents_df = agents_data()\r\n br_data_df['new_col'] = br_data_df['agent_type'].astype(str) ### esto no sé si debería cambiarlo\r\n br_data_df = br_data_df.loc[br_data_df['new_col'] == \"tc\"]\r\n br_data_df = br_data_df.reset_index(drop=True)\r\n to = str()\r\n if slot == 1:\r\n ca_location_1 = agent_df.loc[0, 'location_1']\r\n br_data_df['location_ca'] = str(ca_location_1) ### location 1!!!!\r\n br_data_df['dash'] = \"-\"\r\n br_data_df[\"from_to\"] = br_data_df[\"location_ca\"] + br_data_df[\"dash\"] + br_data_df[\"location\"]\r\n to = \"location_\" + ca_location_1 # location 1!!!!!\r\n elif slot == 2:\r\n ca_location_2 = agent_df.loc[0, 'location_2']\r\n br_data_df['location_ca'] = str(ca_location_2) ### location 2!!!!\r\n br_data_df['dash'] = \"-\"\r\n br_data_df[\"from_to\"] = br_data_df[\"location_ca\"] + br_data_df[\"dash\"] + br_data_df[\"location\"]\r\n to = \"location_\" + ca_location_2 # location 2!!!!!\r\n active_users_location_df = br_data_df\r\n ca_locations_dist_df = locations_min_distances()\r\n ca_locations_dist_df = ca_locations_dist_df[['id_min', to]]\r\n tr_list = br_data_df['from_to'].tolist()\r\n values = []\r\n keys = []\r\n for i in tr_list:\r\n a = ca_locations_dist_df.loc[ca_locations_dist_df[to] == i]\r\n id_loop = a.loc[a.index[-1], 'id_min']\r\n tr_to_loop = a.loc[a.index[-1], to]\r\n keys.append(id_loop)\r\n values.append(tr_to_loop)\r\n segment = dict(zip(keys, values))\r\n segment_df = pd.DataFrame([segment])\r\n segment_df = segment_df.T\r\n indexes = segment_df.index.values.tolist()\r\n segment_df = segment_df.rename(columns={0: \"segment\"})\r\n segment_df.insert(loc=0, column='id_min', value=indexes)\r\n segment_df = segment_df.sort_values(by=['id_min'])\r\n segment_df = segment_df.reset_index(drop=True) # segment_df contains the location of active tr and id_name sorted by shortest distance to them\r\n tr_list = active_users_location_df['agent'].tolist()\r\n jid_names = pd.DataFrame()\r\n for i in tr_list:\r\n a = agents_df.loc[agents_df['Name'] == i]\r\n jid_names = jid_names.append(a)\r\n active_users_location_df = active_users_location_df.rename(columns={'from_to': 'segment'})\r\n #print(f'active_users_location_df: {active_users_location_df}')\r\n #print(f'segment_df: {segment_df}')\r\n results = active_users_location_df.merge(segment_df, on='segment')\r\n\r\n results = results.rename(columns={'agent': 'Name'})\r\n results = results.merge(jid_names, on='Name')\r\n results = results.sort_values(by=['id_min'])\r\n results = results[['Name', 'location', 'segment', 'id_min', 'User name']]\r\n return results", "def create_agents() -> List[InsuranceAgent]:\n agents = []\n for consumer in range(AGENTS_COUNT):\n insurance_agent = InsuranceAgent(\n personal_info={\n AGE: FAKE.random_int(min=0, max=120),\n STATE: FAKE.state(),\n KIDS_COUNT: FAKE.random_int(min=0, max=12),\n CARS_COUNT: FAKE.random_int(min=0, max=10),\n INSURANCE_OPERATION: random.choice((RENT, BUY)),\n INCOME: FAKE.random_int(min=0, max=1000000),\n PHONE_NUMBER: FAKE.phone_number(),\n AVAILABLE: True,\n },\n call_acceptance_criteria=[\n {\n \"person_attribute\": AGE,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=120,\n ),\n },\n {\n \"person_attribute\": INCOME,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=1000000,\n ),\n },\n {\n \"person_attribute\": KIDS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": CARS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": INSURANCE_OPERATION,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": random.choice((RENT, BUY)),\n },\n ],\n )\n agents.append(insurance_agent)\n return agents", "def suicide_query(game_mode=0, observability=-1, game_seed=-1, agent=-1):\n\n event_id = \"death\"\n\n # Keep only those games within given configuration\n if game_seed != -1:\n selection = data.loc[(data['game_mode'] == game_mode) & (data['observability'] == observability) &\n (data['game_seed'] == game_seed)]\n else:\n selection = data.loc[(data['game_mode'] == game_mode) & (data['observability'] == observability)]\n if agent != -1:\n for index, row in selection.iterrows():\n if agent not in row[\"agents\"]:\n selection.drop(index, inplace=True)\n\n # print(selection.size)\n\n team_kill_count = []\n ngames = 0 # Number of games in which this agent dies\n suicides = 0 # Number of games in which this agent commits suicide\n events_per_sample = []\n team_kills = 0\n\n # Iterate through selected game data\n for index, row in selection.iterrows():\n if agent in row[\"agents\"] and row['event_id'] == event_id: # This agent played in the game\n\n # Find its agent ID depending on its position in the agent list. There may be more than 1 agent of this\n # type in the game, so iterate over all and check individually.\n ll = row[\"agents\"]\n indices = [i for i, el in enumerate(ll) if el == agent]\n\n for agent_id in indices:\n # teammate = (agent_id + 2) % 4\n sample_event_counter = 0\n for event in row[\"event_data\"]:\n if event[\"agent_id\"] == agent_id: # This agent dies\n if event[\"killer\"] == agent_id: # Suicide\n sample_event_counter += 1\n # if event[\"killer\"] == teammate: # Killed by teammate\n # team_kills += 1\n # if event[\"agent_id\"] == teammate: # Teammate dies\n # if event[\"killer\"] == agent_id: # Killed by this agent\n # team_kill_count += 1\n ngames += 1\n events_per_sample.append(sample_event_counter)\n suicides += sample_event_counter\n\n # suicide_count.append(100*suicides/ngames) # Showing percentage of game suicides\n # team_kill_count.append(100*team_kills/games)\n\n # percentage = 100 * suicides / ngames\n # mean = ngames * (percentage / 100)\n # variance = mean * (1 - (percentage / 100))\n # std_dev = math.sqrt(variance)\n # std_err = std_dev / math.sqrt(ngames)\n # h = std_err * scipy.stats.t.ppf(1.95 / 2., ngames - 1) # 95 confidence interval\n # return percentage, h\n\n # print(events_per_sample)\n mean = suicides/ngames\n variance = sum([pow(x - mean, 2) for x in events_per_sample])/len(events_per_sample)\n std_dev = math.sqrt(variance)\n std_err = std_dev/math.sqrt(len(events_per_sample))\n h = std_err * scipy.stats.t.ppf(1.95 / 2., ngames - 1) # 95% confidence interval\n return mean * 100, h * 100 # , team_kill_count", "def intents_clustering(self):\n self.phrs2intents = {}\n number_of_other = 10000;\n for i in range(len(self.data)):\n for ut in self.data[i]['utterances']:\n if ut['speaker'] == 'USER':\n if 'segments' in ut.keys():\n for seg in ut['segments']:\n if 'annotations' in seg.keys():\n for anno in seg['annotations']:\n name = anno['name']\n if ut['text'] not in self.phrs2intents.keys():\n self.phrs2intents[ ut['text'] ] = [name]\n elif name not in self.phrs2intents[ ut['text'] ]:\n self.phrs2intents[ ut['text'] ].append(name)\n else:\n if number_of_other > 0:\n self.phrs2intents[ ut['text'] ] = ['other']\n number_of_other -= 1\n self.X = np.array(list(self.phrs2intents.keys()))", "def get_num_cams(self, data):\n return self.parse_data(data)[1]", "def vehicle_registration_number_for_driver_of_age(data):\n age = int(data['driver_age'])\n vehicle_numbers = []\n for i in range(1, PARKING_LOT[0] + 1):\n if PARKING_LOT[i].vehicle_driver_age == age:\n vehicle_numbers.append(PARKING_LOT[i].vehicle_registration_number)\n return str(vehicle_numbers).strip('[]').replace(' ', '') if vehicle_numbers else 'null'", "def server_agent_statistics(ctx):\n data = ctx.obj.get_agent_statistics()\n output_json_data(data)", "def extract_number_target_genes(d, g, miranda_fnm):\n\toutput_nm = \"%s_counts.txt\"%(miranda_fnm[:-4])\n\toutput = open(output_nm, 'w')\n\toutput.write(\"miRNA\\ttotal_target_genes\\ttarget_genes_down\\ttarget_genes\\n\")\n\tfor key in d:\n\t\tif len(d[key]) > 0:\n\t\t\t#print key, len(d[key])\n\t\t\toutput.write(\"%s\\t%s\\t%s\\t%s\\n\"%(key, len(d[key]), \n\t\t\t\tlen(g[key]), str(g[key]).strip('[]')))\n\n\toutput.close()", "def _num_nodes(self):\n return len(self._nid2partid)", "def leadersmorts_dpt(source):\n\n with open(source,encoding=\"utf-8\",mode=\"r\") as file:\n nbmpd= {}\n line = file.readline() # Lit la ligne d'entete\n for line in file:\n line = line.strip()\n if len(line) > 0:\n l = line.split(\";\")\n leader = netoyer_donnees(l)\n\n # tri departements\n if leader[\"Departamento\"] in nbmpd:\n nbmpd[leader[\"Departamento\"]] = nbmpd[leader[\"Departamento\"]]+1\n else:\n nbmpd[leader[\"Departamento\"]] = 1\n\n return nbmpd", "def split_heroes(self, data):\n heroes = []\n try:\n for chunk in data.split(','):\n # HeroID, 10/J/Q/K/A, face of card\n gid, rank, face = chunk.split('|')\n heroes.append({'gid':gid, 'rank':rank, 'face':face})\n except ValueError:\n pass\n return heroes", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def get_n_splits(self):\n pass", "def count_extracted(j_data):\n count = 0\n for record in j_data:\n tmp = {}\n desc = record['lcr_desc'].lower().split('/')\n title = desc[0]\n cat = category(title)\n if cat and 'location' in record:\n count += 1\n return count", "def get_wh_list(br_data_df, agent_full_name, agent_directory):\r\n agent_df = pd.read_csv(f'{agent_full_name}.csv', header=0, delimiter=\",\", engine='python')\r\n agents_df = agents_data()\r\n br_data_df['new_col'] = br_data_df['agent_type'].astype(str) ### esto no sé si debería cambiarlo\r\n br_data_df = br_data_df.loc[br_data_df['new_col'] == \"wh\"]\r\n br_data_df = br_data_df.reset_index(drop=True)\r\n to = str()\r\n ca_location_2 = agent_df.loc[0, 'location_2']\r\n br_data_df['location_ca'] = str(ca_location_2) ### location 2!!!!\r\n br_data_df['dash'] = \"-\"\r\n br_data_df[\"from_to\"] = br_data_df[\"location_ca\"] + br_data_df[\"dash\"] + br_data_df[\"location\"]\r\n to = \"location_\" + ca_location_2 # location 2!!!!!\r\n active_users_location_df = br_data_df\r\n ca_locations_dist_df = locations_min_distances()\r\n ca_locations_dist_df = ca_locations_dist_df[['id_min', to]]\r\n wh_list = br_data_df['from_to'].tolist()\r\n values = []\r\n keys = []\r\n for i in wh_list:\r\n a = ca_locations_dist_df.loc[ca_locations_dist_df[to] == i]\r\n id_loop = a.loc[a.index[-1], 'id_min']\r\n tr_to_loop = a.loc[a.index[-1], to]\r\n keys.append(id_loop)\r\n values.append(tr_to_loop)\r\n segment = dict(zip(keys, values))\r\n segment_df = pd.DataFrame([segment])\r\n segment_df = segment_df.T\r\n indexes = segment_df.index.values.tolist()\r\n segment_df = segment_df.rename(columns={0: \"segment\"})\r\n segment_df.insert(loc=0, column='id_min', value=indexes)\r\n segment_df = segment_df.sort_values(by=['id_min'])\r\n segment_df = segment_df.reset_index(drop=True) # segment_df contains the location of active tr and id_name sorted by shortest distance to them\r\n tr_list = active_users_location_df['agent'].tolist()\r\n jid_names = pd.DataFrame()\r\n for i in tr_list:\r\n a = agents_df.loc[agents_df['Name'] == i]\r\n jid_names = jid_names.append(a)\r\n active_users_location_df = active_users_location_df.rename(columns={'from_to': 'segment'})\r\n print(f'active_users_location_df: {active_users_location_df}')\r\n print(f'segment_df: {segment_df}')\r\n results = active_users_location_df.merge(segment_df, on='segment')\r\n results = results.rename(columns={'agent': 'Name'})\r\n results = results.merge(jid_names, on='Name')\r\n results = results.sort_values(by=['id_min'])\r\n results = results[['Name', 'location', 'segment', 'id_min', 'User name']]\r\n return results", "def prepare_count_incidents(self, object):\n roles = object.actorrole_set.all()\n return Incident.objects.filter(actors_role__in=roles).count()", "def count_passages(self, step, count):\r\n count = int(count)\r\n assert_equals(len(world.css_find('.annotatable-span')), count)\r\n assert_equals(len(world.css_find('.annotatable-span.highlight')), count)\r\n assert_equals(len(world.css_find('.annotatable-span.highlight-yellow')), count)", "def create_data_model(count):\n # matrix_data = pd.read_csv(''.join([filepath,'site_latlong.csv']), index_col=\"SAP_ID\")\n matrix_data = pd.read_csv('newdata.csv')\n matrix = matrix_data.values.tolist()\n data = {}\n data['distance_matrix'] = []\n for i in matrix: \n data['distance_matrix'].append(i)\n #data['vehicle_capacities'] = [100, 100, 100, 100,100]\n data['num_vehicles'] = 10\n data['depot'] = 0\n data['demands'] = []\n for i in range (count):\n data['demands'].append(1)\n \n num = math.ceil(count/data['num_vehicles'] + 2)\n print(num)\n data['vehicle_capacities'] = []\n for i in range (data['num_vehicles']):\n data['vehicle_capacities'].append(num)\n \n #data['vehicle_capacities'] = [20,20, 20, 20,20,20,20,20,20,20]\n return data", "def get_num_pids(self, data):\n pids = set()\n for items in data:\n pid = items[1]\n pids.add(pid)\n return len(pids)", "def _prepare_agents_vals_partner(self, partner):\n return [(0, 0, self._prepare_agent_vals(purchase_agent)) for purchase_agent in partner.purchase_agent_ids]", "async def test_nr_of_metrics(self):\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(\n response,\n value=str(len(self.entities)),\n total=self.expected_software_metrics,\n entities=self.entities,\n )", "def NAcc(row):\n m = Chem.MolFromSmiles(row.SMILES)\n acceptors = Descriptors.NumHAcceptors(m)\n return acceptors", "def Test_NumSegments(Daten):\n N_Leitungen = len(Daten.PipeSegments)\n\n return N_Leitungen", "def display_agents(self):\n for agent in self.scheduler.agents:\n id_ = agent.id_\n p = agent.mobility.current\n x, y = to_geometry(p[0]), to_geometry(p[1])\n r = to_geometry(agent.range_)\n print('define agent{} ellipse 4 4 white {} {}'.format(id_, x, y))\n print('define agentr{0} ellipse {1} {1} white {2} {3}'.format(\n id_, r, x, y))\n self.change_agent_status(agent)", "def lives_counter(self):\n count = 15\n for row in self.board:\n for column in row:\n if column == HITSHIP:\n count -= 1\n self.lives = count\n return self.lives", "def count_persons(self):\n\n # full path of personData file\n path = self.pretty_print_path()\n\n data = self.person_data[[\"person_id\", \"telestatus\"]].copy()\n\n conditions = [(data[\"telestatus\"] == \"Not a Worker\"),\n (data[\"telestatus\"].isin([\"No Telecommute\",\n \"1 day a week\",\n \"2-3 days a week\",\n \"4+ days a week\",\n \"Work from Home\"]))]\n choices = [\"Non-worker\", \"Worker\"]\n data[\"telestatus\"] = np.select(conditions, choices, default=data[\"telestatus\"])\n\n counts = data.groupby([\"telestatus\"]).count()\n counts.loc['Total'] = counts.sum()\n counts.reset_index(inplace=True)\n counts['File'] = path # add file name\n\n results = counts.pivot(index=\"File\", columns=\"telestatus\", values=\"person_id\")\n\n # add percentage share\n counts[\"Total\"] = data.person_id.count()\n counts[\"share\"] = counts.person_id / counts.Total\n percent_share = counts.pivot(index=\"File\", columns=\"telestatus\", values=\"share\")\n\n results = results.append(percent_share, ignore_index=False, sort=True)\n\n cols = [\"Worker\",\n \"Non-worker\",\n \"Total\"]\n\n return results[cols]", "def count_unvisited(data):\n count = sum(n.count(\"n\") for n in data)\n return count", "def tx_counts(self) -> Dict[str, Dict[str, int]]:\n agent_pbk_to_name = self.game.configuration.agent_pbk_to_name\n result = {agent_name: 0 for agent_name in agent_pbk_to_name.values()}\n results = {\"seller\": result.copy(), \"buyer\": result.copy()}\n\n temp_game = Game(self.game.configuration, self.game.initialization)\n\n # compute the partial scores for every agent after every transaction\n # (remember that indexes of the transaction start from one, because index 0 is reserved for the initial scores)\n for idx, tx in enumerate(self.game.transactions):\n temp_game.settle_transaction(tx)\n results[\"seller\"][agent_pbk_to_name[tx.seller_pbk]] += 1\n results[\"buyer\"][agent_pbk_to_name[tx.buyer_pbk]] += 1\n\n return results", "def form_agents(n, r, a, b, agents):\n for a_ind, b_ind in izip(a, b):\n #util_fn = random.choice([util.Linear, util.CES, util.CobbDouglas])\n #util_fn = util_fn.rand(p, n)\n #util_fn = util.Linear.rand(n, r, a_ind)\n\n agent = Agent.rand(util.Linear, n, r, a_ind, b_ind)\n agents.add(agent)", "def part_one(answer_data):\n group_counts = []\n for answer_group in parse_groups(answer_data):\n group_counts.append(count_answer_set_union(answer_group))\n return sum(group_counts)", "def agency_parse(sents):\r\n agency_list = []\r\n for sent in sents:\r\n agency_out = {\"sent\": [], \"subj\": [], \"process\": [], \"dobj\": [], \"propj\": []}\r\n #output = sent + \"\\n\"\r\n parsed = nlp(sent)\r\n for token in parsed:\r\n agency_out[\"sent\"].append(token.text)\r\n if token.dep_ == \"nsubj\":\r\n agency_out[\"subj\"].append(token.text)\r\n elif token.dep_ == \"ROOT\":\r\n agency_out[\"process\"].append(token.text)\r\n elif token.dep_ == \"dobj\":\r\n agency_out[\"dobj\"].append(token.text)\r\n elif token.dep_ == \"propj\":\r\n agency_out[\"propj\"].append(token.text)\r\n agency_list.append(agency_out)\r\n return agency_list\r\n\r\n #displacy.serve(nlp(sent), style=\"dep\")\r", "def get_nucliators_num_and_proba(self):\n XY = self.XY\n TIMES = self.die_times\n # CHEN'S IMPLEMENTATION\n # nucliators = np.array([True for i in range(len(TIMES))])\n # leaders = np.array([-1 for i in range(len(TIMES))])\n # cells_idx_sorted_by_times = np.arange(0, len(TIMES), 1)\n # for cell_idx in cells_idx_sorted_by_times:\n # # nucliators[cell_idx] = True\n # cell_death = TIMES[cell_idx]\n # neighbors_prior_death = [True for i in range(len(self.neighbors_list[cell_idx]))]\n # for neighbor_idx in self.neighbors_list[cell_idx]:\n # # if nucliators[cell_idx] == True:\n # # break\n # neighbor_death = TIMES[neighbor_idx]\n # if cell_death > neighbor_death:# and leaders[cell_idx] == -1:\n # nucliators[cell_idx] = False\n # # leaders[cell_idx] = cell_idx\n # elif cell_death == neighbor_death and not nucliators[neighbor_idx]:\n # nucliators[cell_idx] = False\n # leaders[cell_idx] = cell_idx\n # else:\n # nucliators[cell_idx] = True\n # # if leaders[neighbor_idx] != -1:\n # # leaders[cell_idx] = leaders[neighbor_idx]\n #\n # self.nucliators = nucliators\n # self.nucliators_num = nucliators.sum()\n # self.nucliation_proba = self.nucliators_num / len(XY)\n\n # MY IMPLEMENTATION\n self.nucliators = self.nucliators_counter.calc_nucliators()\n self.nucliators_num = self.nucliators.sum()\n self.nucliation_proba = self.nucliators_num / len(self.XY)", "def count_genotypes(genotypeList,StateGenPosData, x, y):\r\n allMos = 0\r\n nonEggs = 0\r\n Adults = 0\r\n for i in range(len(genotypeList)):\r\n gt = genotypeList[i]\r\n b = sum(1 for item in StateGenPosData if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n c = sum(1 for item in StateGenPosData if 'adult' in item[0] and 'XX' in item[1] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n d = sum(1 for item in StateGenPosData if 'adult' in item[0] and gt in item[1] and item[2]==(x,y))\r\n## for item in StateGenPosData:\r\n## print(item[0],item[1],item[2])\r\n## if 'adult' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## d+=1\r\n## print('yay')\r\n## if not 'new' in item[0] and not 'egg' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## c+=1\r\n## if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## b+=1\r\n allMos = allMos + b\r\n nonEggs = nonEggs + c\r\n Adults = Adults + d\r\n return allMos, nonEggs, Adults", "def write_csv(self, agent_num):\n data = []\n data.append(self.t)\n data.append(self.w_EDR)\n data.append(self.w_RESOURCE)\n data.append(self.w_DISTANCE)\n data.append(agent_num)\n for task_num, task in enumerate(self.tasks):\n vectorized_task_loc = self.get_vectorized_location(task.getloc())\n is_occupied = self.agent_locations[0][vectorized_task_loc] # 1 if occupied\n data.append(is_occupied)\n # data.extend(np.ndarray.tolist(self.agent_locations)) # Feature 1\n data.extend(np.ndarray.tolist(self.is_task_finished)) # Feature 2\n data.extend(np.ndarray.tolist(self.is_task_enabled)) # Feature 3\n data.extend(np.ndarray.tolist(self.is_task_alive)) # Feature 4\n data.extend(np.ndarray.tolist(self.travel_time_constraint_satisfied[agent_num])) # Feature 5\n data.extend(self.is_agent_idle[agent_num]) # Feature 6\n data.extend(np.ndarray.tolist(self.agent_distances[agent_num])) # Feature 7\n for task_num, task in enumerate(self.tasks):\n vectorized_task_loc = self.get_vectorized_location(task.getloc())\n tasks_in_each_square = self.how_many_tasks_in_each_square[0][vectorized_task_loc] # 1 if occupied\n data.append(tasks_in_each_square)\n # data.extend(np.ndarray.tolist(self.how_many_tasks_in_each_square)) # Feature 8\n data.extend(np.ndarray.tolist(self.orientation[agent_num])) # Feature 9\n data.extend(np.ndarray.tolist(self.task_deadlines)) # Feature 10\n data.extend(np.ndarray.tolist(self.is_task_in_progress)) # Feature 11\n data.extend(np.ndarray.tolist(self.orientation[agent_num] * self.agent_distances[agent_num])) # Feature 12\n data.append(self.task_to_schedule) # Output\n self.naive_total_data.append(data)\n # with open('1_schedule.csv', 'a') as outfile:\n # writer = csv.writer(outfile)\n # writer.writerow(data)", "def count_male_teams(self):\n return len(self.df['Adult male 11v11 (16-45)'].dropna())", "def get_coil_list(br_data_df, agent_full_name, agent_directory):\r\n agent_df = pd.read_csv(f'{agent_full_name}.csv', header=0, delimiter=\",\", engine='python')\r\n agents_df = agents_data()\r\n br_data_df['new_col'] = br_data_df['agent_type'].astype(str) ### esto no sé si debería cambiarlo\r\n br_data_df = br_data_df.loc[br_data_df['new_col'] == \"coil\"]\r\n to = str()\r\n ca_location_1 = agent_df.loc[0, 'location_1']\r\n br_data_df['location_ca'] = str(ca_location_1) ### location 1!!!!\r\n br_data_df['dash'] = \"-\"\r\n br_data_df[\"from_to\"] = br_data_df[\"location_ca\"] + br_data_df[\"dash\"] + br_data_df[\"location\"]\r\n to = \"location_\" + ca_location_1 # location 1!!!!!\r\n active_users_location_df = br_data_df\r\n ca_locations_dist_df = locations_min_distances()\r\n ca_locations_dist_df = ca_locations_dist_df[['id_min', to]]\r\n tr_list = br_data_df['from_to'].tolist()\r\n values = []\r\n keys = []\r\n for i in tr_list:\r\n a = ca_locations_dist_df.loc[ca_locations_dist_df[to] == i]\r\n id_loop = a.loc[a.index[-1], 'id_min']\r\n tr_to_loop = a.loc[a.index[-1], to]\r\n keys.append(id_loop)\r\n values.append(tr_to_loop)\r\n segment = dict(zip(keys, values))\r\n segment_df = pd.DataFrame([segment])\r\n segment_df = segment_df.T\r\n indexes = segment_df.index.values.tolist()\r\n segment_df = segment_df.rename(columns={0: \"segment\"})\r\n segment_df.insert(loc=0, column='id_min', value=indexes)\r\n segment_df = segment_df.sort_values(by=['id_min'])\r\n segment_df = segment_df.reset_index(drop=True) # segment_df contains the location of active tr and id_name sorted by shortest distance to them\r\n tr_list = active_users_location_df['agent'].tolist()\r\n jid_names = pd.DataFrame()\r\n for i in tr_list:\r\n a = agents_df.loc[agents_df['Name'] == i]\r\n jid_names = jid_names.append(a)\r\n active_users_location_df = active_users_location_df.rename(columns={'from_to': 'segment'})\r\n print(f'active_users_location_df: {active_users_location_df}')\r\n print(f'active_users_location_df: {segment_df}')\r\n results = active_users_location_df.merge(segment_df, on='segment')\r\n results = results.rename(columns={'agent': 'Name'})\r\n results = results.merge(jid_names, on='Name')\r\n results = results.sort_values(by=['id_min'])\r\n results = results[['Name', 'location', 'segment', 'id_min', 'User name']]\r\n return results", "def __node_rep(self):\n node_list_dict = {}\n for (i, beam) in enumerate(self.beams):\n if str(beam['n1']) not in node_list_dict.keys():\n node_list_dict[str(beam['n1'])] = 1\n else:\n node_list_dict[str(beam['n1'])] += 1\n if str(beam['n2']) not in node_list_dict.keys():\n node_list_dict[str(beam['n2'])] = 1\n else:\n node_list_dict[str(beam['n2'])] += 1\n return node_list_dict", "def agents_cleanup(agents, n) -> set:\n return set(agent for agent in agents if agent[0] < n and agent[1] < n)", "def break_count(self):\n return len(self.link_ids) + len(self.crossring_cleavages)", "def parse_counts(data):\r\n \r\n results = {}\r\n region_year = np.stack([data[0], data[4].astype('datetime64[Y]').astype(int) + 1970], axis=0)\r\n region_years, counts = np.unique(region_year, return_counts=True, axis=1)\r\n region_years_counts = list(zip(region_years[1], region_years[0], counts))\r\n results['years'] = np.unique(region_year[1])\r\n results['regions'] = np.unique(region_year[0])\r\n for year in results['years']:\r\n results[year] = np.array(get_counts_for_year(region_years_counts, year))\r\n return results", "def number_of_data_nodes(self):\n return int(self._data['number_of_data_nodes'])", "def counterdict(self):\n vas = []\n file = self.read1()\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n if s_i != \"\":\n vas.append(s_i)\n for ele in enumerate(vas):\n print(ele)\n logging.debug(\"Starting with to\")", "def generate_feature_counts(traj, mdp):\n #count each time a state was visited \n counts = Counter({feature:0 for feature in mdp.features})\n for state,action in traj:\n counts[mdp.observe_features(state)] += 1\n \n return [counts[feature] for feature in mdp.features]", "def get_country_count():\n numbers=country_populations.split('\\n')\n count_numbers= len(numbers)-1\n return count_numbers", "def _construct_agents(self, agent_coords, static_map):\n team_blue = []\n team_red = []\n\n Class = {\n TEAM1_UAV : (AerialVehicle, TEAM1_BACKGROUND),\n TEAM2_UAV : (AerialVehicle, TEAM2_BACKGROUND),\n TEAM1_UGV : (GroundVehicle, TEAM1_BACKGROUND),\n TEAM2_UGV : (GroundVehicle, TEAM2_BACKGROUND),\n TEAM1_UGV2: (GroundVehicle_Tank, TEAM1_BACKGROUND),\n TEAM2_UGV2: (GroundVehicle_Tank, TEAM2_BACKGROUND),\n TEAM1_UGV3: (GroundVehicle_Scout, TEAM1_BACKGROUND),\n TEAM2_UGV3: (GroundVehicle_Scout, TEAM2_BACKGROUND),\n TEAM1_UGV4: (GroundVehicle_Clocking, TEAM1_BACKGROUND),\n TEAM2_UGV4: (GroundVehicle_Clocking, TEAM2_BACKGROUND),\n }\n\n for element, coords in agent_coords.items():\n if coords is None: continue\n for coord in coords:\n Vehicle, team_id = Class[element]\n cur_ent = Vehicle(coord, static_map, team_id, element)\n if team_id == TEAM1_BACKGROUND:\n team_blue.append(cur_ent)\n elif team_id == TEAM2_BACKGROUND:\n team_red.append(cur_ent)\n\n return team_blue, team_red", "def number_agents_required(self):\n return self._get('number_agents_required')", "def num_ad_pruning(edge_list=\n path+'connected-component-analysis-round2/network-profiling-data/cid6_analysis/cid6-edge-list',\n worker_ads_file = path+'connected-component-analysis-round2/network-profiling-data/cid6_analysis/worker-ads-int-dict.json'):\n G = nx.read_edgelist(edge_list, delimiter='\\t')\n worker_ints = json.load(open(worker_ads_file, 'r'))\n print nx.info(G)\n threshold = 16\n count = 0\n forbidden_phones = set()\n # with codecs.open(edge_phone_count, 'r', 'utf-8') as f:\n # for line in f:\n # obj = json.loads(line[0:-1])\n # if int(obj.keys()[0]) >= threshold:\n # forbidden_phones = forbidden_phones.union(set(obj[obj.keys()[0]]))\n # with codecs.open(phone_edge_list, 'r', 'utf-8') as f:\n # for line in f:\n # fields = re.split('\\t', line[0:-1])\n # phones = set(fields[2:])\n # if len(phones.intersection(forbidden_phones)) != 0:\n # count += 1\n # G.remove_edge(fields[0], fields[1])\n H = nx.Graph()\n for e in G.edges:\n if e[0] not in worker_ints or e[1] not in worker_ints:\n raise Exception\n else:\n if len(worker_ints[e[0]]) < threshold and len(worker_ints[e[1]]) < threshold:\n H.add_edge(e[0], e[1])\n else:\n count += 1\n print str(count),' edges pruned from graph'\n print nx.info(H)\n ccs = sorted(nx.connected_components(H), key=len, reverse=True)\n print len(ccs)\n print len(ccs[0])", "def split(self, how, nr_agents, **kwargs):\n if how == 'random':\n self.random_split(nr_agents)\n elif how == 'uniform':\n self.uniform_split(nr_agents)\n elif how == 'non_iid_uniform':\n self.non_iid_split(nr_agents, kwargs['class_per_node'], random=False)\n elif how == 'non_iid_random':\n self.non_iid_split(nr_agents, kwargs['class_per_node'], random=True)\n\n return self.get_data()", "def obstacle_count(self):\n self.wide_scan()\n found_something = False\n counter = 0\n for distance in self.scan:\n if distance and distance < 200 and not found_something:\n found_something = True\n counter += 1\n print(\"Object # %d found, I think\" % counter)\n if distance and distance > 200 and found_something:\n found_something = False\n print(\"\\n----I SEE %d OBJECTS----\\n\" % counter)", "def get_number_of_persons(self):\n self.__load_persons_from_file_into_memory()\n return super().get_number_of_persons()", "def _count_occupied_seats(grid: List[List[str]]) -> int:\n total = 0\n for row in grid:\n total += row.count('#')\n return total", "def iterate_data(self):\n if \"single\" in self.dataset_name:\n # Index 0 for list of sentence lengths, index 1 for list of token lengths\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for answer_id in self.data:\n summary = self.data[answer_id]['summary']\n articles = self.data[answer_id]['articles']\n question = self.data[answer_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(articles, 'article')\n self._get_token_cnts(question, 'question')\n self._write_stats(\"token_counts\")\n\n if \"multi\" in self.dataset_name:\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for q_id in self.data:\n summary = self.data[q_id]['summary']\n question = self.data[q_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(question, 'question')\n question = self.data[q_id]['question']\n for answer_id in self.data[q_id]['articles']:\n articles = self.data[q_id]['articles'][answer_id][0]\n if args.tokenize:\n self._get_token_cnts(articles, 'article')\n self._write_stats(\"token_counts\")\n\n if self.dataset_name == \"complete_dataset\":\n self.stat_dict = {'urls': [], 'sites': []}\n article_dict = {}\n print(\"Counting answers, sites, unique urls, and tokenized counts of unique articles\")\n answer_cnt = 0\n for q_id in self.data:\n for a_id in self.data[q_id]['answers']:\n answer_cnt += 1\n url = self.data[q_id]['answers'][a_id]['url']\n article = self.data[q_id]['answers'][a_id]['article']\n if url not in article_dict:\n article_dict[url] = article\n self.stat_dict['urls'].append(url)\n assert \"//\" in url, url\n site = url.split(\"//\")[1].split(\"/\")\n self.stat_dict['sites'].append(site[0])\n print(\"# of Answers:\", answer_cnt)\n print(\"Unique articles: \", len(article_dict)) # This should match up with count written to file\n self._write_stats(\"full collection\")\n\n # Get token/sent averages of unique articles\n if args.tokenize:\n self.stat_dict = {'article': [[], []]}\n for a in article_dict:\n self._get_token_cnts(article_dict[a], 'article')\n self._write_stats(\"token_counts\")", "def num_trials(self):", "def obstacle_count(self):\n #scan area in front of robot\n self.scan()\n #Figure ot how many obstacles there were\n see_an_object = False\n count = 0", "def num_atoms(self):\n return self.h5['{}/{}'.format(SETTINGS, N_ATOMS)][()]", "def analyze_count(data):\n\n dsct_vk = pd.unique(data['vk'])\n dsct_itemid = pd.unique(data['itemid'])\n\n print 'number of user:', dsct_vk.shape\n print 'number of items:', dsct_itemid.shape\n print 'the number of ratings:', data.shape\n\n print 'unique actions:', pd.unique(data['action'])\n print 'the number of action 0:', np.sum(data['action'] == 0)\n print 'the number of action 1:', np.sum(data['action'] == 1)\n print 'the number of action 2:', np.sum(data['action'] == 2)\n print 'the number of action 3:', np.sum(data['action'] == 3)\n print 'the number of action 4:', np.sum(data['action'] == 4)\n \n time_range_item = data.groupby('itemid')['real_time'].aggregate(sum_unique)\n print 'Max Range:', np.max(time_range_item)\n print 'Mean Range:', np.mean(time_range_item)\n print 'Median Range:', np.median(time_range_item)", "def count_vario(dist_param, picker_param):\n orig = '/home/zby/MAGISTERKA/MGR/results/oryginal.clustered.t'\n cl_orig = read_clustered(orig)\n name_tag = ''\n ndist = dist_param[1:]\n npick = picker_param[1:]\n for index in drange(5, 20, 0.5):\n name_tag = \"{}_{}_{}\".format(index, npick, ndist)\n try:\n clust2 = read_clustered(tfidf_name('merged.stem{}.stop.clustered.t', name_tag))\n except:\n print(\"no data for {}\".format(name_tag))\n continue\n var, norm = variation_of_information(cl_orig, clust2)\n print(\" {} VOI is {}\".format(name_tag, norm))" ]
[ "0.6075499", "0.60177237", "0.5733644", "0.56019175", "0.551722", "0.5478866", "0.5476945", "0.5474742", "0.5418446", "0.53685576", "0.5309423", "0.530098", "0.530098", "0.5294951", "0.5268969", "0.525845", "0.52577895", "0.52215326", "0.5189219", "0.51605994", "0.5125644", "0.511004", "0.5101211", "0.509693", "0.50928366", "0.5087729", "0.50860715", "0.50832605", "0.5072151", "0.5067862", "0.503516", "0.5019617", "0.5005804", "0.5004667", "0.49878612", "0.4984268", "0.49823648", "0.49646842", "0.49611843", "0.4944255", "0.4932886", "0.4927993", "0.48934382", "0.48871496", "0.4884752", "0.4828771", "0.48284742", "0.4817431", "0.48141375", "0.48135474", "0.4810663", "0.4810349", "0.4809624", "0.48071045", "0.48035923", "0.47908017", "0.47819605", "0.47791788", "0.47784987", "0.47728813", "0.47676563", "0.47675204", "0.47654507", "0.4762921", "0.47573784", "0.47572064", "0.47559813", "0.47544587", "0.4751393", "0.4746854", "0.47431841", "0.4737365", "0.47354034", "0.47284865", "0.47255155", "0.47244155", "0.4719856", "0.47093183", "0.47091562", "0.47077122", "0.47023708", "0.4698323", "0.46970066", "0.46959394", "0.46941033", "0.4687605", "0.46863377", "0.4685985", "0.46841893", "0.4665245", "0.4664503", "0.46627915", "0.46622643", "0.4659357", "0.46557048", "0.4642666", "0.46353564", "0.46343347", "0.463386", "0.46287486", "0.4623294" ]
0.0
-1
Different ways to split data between nodes.
def split(self, how, nr_agents, **kwargs): if how == 'random': self.random_split(nr_agents) elif how == 'uniform': self.uniform_split(nr_agents) elif how == 'non_iid_uniform': self.non_iid_split(nr_agents, kwargs['class_per_node'], random=False) elif how == 'non_iid_random': self.non_iid_split(nr_agents, kwargs['class_per_node'], random=True) return self.get_data()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __split_node(self, cur_node):\n temp = self.Node(cur_node.data_list[len(cur_node.data_list) / 2:], cur_node.next_node)\n cur_node.data_list = cur_node.data_list[:len(cur_node.data_list) / 2]\n cur_node.next_node = temp\n\n if cur_node == self.tail:\n self.tail = cur_node.next_node", "def split(self):\n left = BPlusNode(self.order)\n right = BPlusNode(self.order)\n mid = self.order // 2\n\n left.keys = self.keys[:mid]\n left.values = self.values[:mid]\n\n right.keys = self.keys[mid:]\n right.values = self.values[mid:]\n\n # When the node is split, set the parent key to the left-most key of the right child node.\n self.keys = [right.keys[0]]\n self.values = [left, right]\n self.leaf = False", "def split_network(self):\n disconnect_nodes(self.nodes[1], 2)\n disconnect_nodes(self.nodes[2], 1)\n self.sync_all([self.nodes[:2], self.nodes[2:]])", "def split(self, X):", "def split(self, place_leaf_splitted):\n raise NotImplementedError", "def convert_split(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.input(\"AxisTensor\")\n if axis:\n axis = g.get_node(axis[0])\n axis, infered = try_infer_value(axis, g.get_params())\n if infered:\n axis = axis.tolist()[0]\n else:\n axis = op.attr(\"axis\")\n\n sections = op.input(\"SectionsTensorList\")\n if sections:\n tmp_section = []\n for i in sections:\n i = g.get_node(i)\n i, infered = try_infer_value(i, g.get_params())\n if infered:\n i = i.tolist()\n else:\n raise ValueError(\"Dynamic Split not yet supported.\")\n tmp_section.extend(i)\n sections = tmp_section\n else:\n sections = op.attr(\"sections\")\n if sections:\n indices = []\n split_index = 0\n for i in sections[:-1]:\n if i == -1:\n input_shape = infer_shape(x)[axis]\n i = input_shape - np.sum(sections) - 1\n split_index += i\n indices.append(split_index)\n else:\n indices = op.attr(\"num\")\n\n out = _op.split(x, indices, axis)\n for i, out_i in enumerate(out):\n g.add_node(op.output(\"Out\")[i], out_i)", "def split_data(basedir, data_split=0.80):\n manip = data_manipulator(basedir)\n manip.train_test_split(data_split=data_split)", "def split(self, block: ghidra.program.model.mem.MemoryBlock, addr: ghidra.program.model.address.Address) -> None:\n ...", "def train_test_data_split(node_features, labels, train_ratio=0.8):\n num_graph = node_features.shape[0]\n train_test_split = int(train_ratio*num_graph)\n x_train = node_features[:train_test_split,:,:] \n y_train = labels[:train_test_split,:,:] \n x_test = node_features[train_test_split:,:,:] \n y_test = labels[train_test_split:,:,:]\n np.save(\"data/node_features_train.npy\", x_train)\n np.save(\"data/node_features_test.npy\", x_test)\n np.save(\"data/labels_train.npy\", y_train)\n np.save(\"data/labels_test.npy\", y_test)\n return x_train, x_test, y_train, y_test", "def split_node(node: saldag.OpNode):\n\n # Only dealing with single child case for now\n assert (len(node.children) <= 1)\n clone = copy.deepcopy(node)\n clone.out_rel.rename(node.out_rel.name + \"_obl\")\n clone.parents = set()\n clone.children = set()\n clone.is_mpc = True\n child = next(iter(node.children), None)\n saldag.insert_between(node, child, clone)", "def split_data(x, weight = 0.9):\n offset = int(len(x) * weight)\n return x[:offset], x[offset:]", "def _create_split(cls, onnx_node, inputs, opset_version):\n axis = onnx_node.getattr(\"axis\", 0)\n split = onnx_node.getattr(\"split\", None)\n num_output = len(onnx_node.outputs)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axis, split, num_output)", "def split(self, node):\n # check node was not already split\n if node.has_children:\n raise SplitError(\"BUG: node was already split\")\n\n # early stopping (only if enough nodes already)\n if self.n_clusters >= self.min_leaves:\n # make a leaf if too small to split\n if node.size <= 2 * self.min_leaf_size:\n return None, None\n # special case: make a leaf if too deep already\n if len(node.name) > self.max_depth:\n # int(node.name, 2) is too big to be represented as a long (int64)\n # if len(node.name > 62)\n sys.stderr.write('# WARNING: early stopping too deep branch'\n ' {}\\n'.format(node.name))\n sys.stderr.flush()\n return None, None\n\n # bi-partition the node's samples\n if self.split_type == \"kmeans\":\n left, right = self._split_kmeans(node)\n else:\n left, right = self._split_threshold(node)\n\n # check if we have two leaves or none\n if (left is None and right is not None) or (left is not None and right is None):\n raise SplitError(\n \"BUG: both children should be simultaneously\"\n \"either None or not\")\n\n # check the post-conditions\n if left is None or right is None:\n # node is a leaf\n if node.has_children:\n raise SplitError(\"BUG: leaf node marked with (empty) children\")\n # check if it must have been split instead of being a leaf\n if node.size > self.max_leaf_size:\n # force the split\n left, right = self._split_forced(node)\n msg = 'WARNING: forced to split a must-split node that was'\n msg += ' too big to be a leaf ({0} > max_leaf_size={1})\\n'\n sys.stderr.write(msg.format(node.size, self.max_leaf_size))\n if self.n_clusters < self.min_leaves:\n # force the split\n left, right = self._split_forced(node)\n msg = 'WARNING: forced to split a must-split node that had'\n msg += ' not enough clusters ({0} < min_leaves={1})\\n'\n sys.stderr.write(msg.format(self.n_clusters, self.min_leaves))\n\n # finalize the split\n if node.has_children:\n # update the labels of right child only (left keeps the same)\n self.labels[right.ids] = self.n_clusters\n self.n_clusters += 1\n\n return left, right", "def split(self):\n pos_median = len(self.keys)/2\n key_median = self.keys[pos_median]\n value_median = self.values[pos_median]\n keys_left = self.keys[:pos_median]\n keys_right = self.keys[pos_median+1:]\n values_left = self.values[:pos_median]\n values_right = self.values[pos_median+1:]\n if self.is_leaf():\n refs_left = []\n refs_right = []\n else:\n refs_left = self.refs[:pos_median]\n refs_right = self.refs[pos_median+1:]\n\n # Update the current node.\n self.keys = keys_left\n self.values = values_left\n self.refs = refs_left\n\n # Create a new sibling with the right data.\n sibling = BTreeNode(self.m)\n sibling.parent = self.parent\n sibling.keys = keys_right\n sibling.values = values_right\n sibling.refs = refs_right\n for ref in sibling.refs:\n ref.parent = sibling\n\n # If the current node is root, we need to create a new root.\n if self.is_root():\n new_root = BTreeNode(self.m)\n new_root.keys = [key_median]\n new_root.values = [value_median]\n new_root.refs = [self, sibling]\n self.parent = new_root\n sibling.parent = new_root\n else:\n index = self.parent.refs.index(self)\n self.parent.keys.insert(index+1, key_median)\n self.parent.values.insert(index+1, value_median)\n self.parent.refs.insert(index+1, sibling)\n\n if self.parent.is_full():\n self.parent.split()", "def split(T):\r\n mid = T.max_data//2\r\n if T.isLeaf:\r\n leftChild = BTree(T.data[:mid],max_data=T.max_data) \r\n rightChild = BTree(T.data[mid+1:],max_data=T.max_data) \r\n else:\r\n leftChild = BTree(T.data[:mid],T.child[:mid+1],T.isLeaf,max_data=T.max_data) \r\n rightChild = BTree(T.data[mid+1:],T.child[mid+1:],T.isLeaf,max_data=T.max_data) \r\n return T.data[mid], leftChild, rightChild", "def perform_node_split(self, node: RTreeNode[T], group1: List[RTreeEntry[T]], group2: List[RTreeEntry[T]])\\\n -> RTreeNode[T]:\n node.entries = group1\n split_node = RTreeNode(self, node.is_leaf, parent=node.parent, entries=group2)\n self._fix_children(node)\n self._fix_children(split_node)\n return split_node", "def test_split_data(self):\n Xlists = tuple([[np.zeros((200,9)) for b in range(14)] for c in range(9)])\n ybinarylists = [np.zeros((14,12)) for c in range(9)]\n indices = slice(7, 9)\n x_test, y_test = tutorial_pamap2.split_data(Xlists, ybinarylists, \\\n indices)\n test = y_test[0].shape == (12,) and x_test[0].shape == (200, 9)\n assert test", "def split_data(data, test_size):\r\n ntest = int(round(len(data) * (1 - test_size)))+1\r\n \r\n train, test = data[:ntest], data[ntest:]\r\n \r\n return train,test", "def split_data(x, y, ratio, index=None):\n m = x.shape[0]\n splitter = np.cumsum(ratio)\n train_start = 0\n val_start = batch_size * ((splitter[0] * m) // batch_size)\n test_start = batch_size * ((splitter[1] * m) // batch_size)\n test_end = batch_size * ((splitter[2] * m) // batch_size)\n\n val_start = int(val_start)\n test_start = int(test_start)\n test_end = int(test_end)\n\n if index is not None:\n split = ( x[train_start:val_start, :], y[train_start:val_start, :],\n index[train_start:val_start],\n x[val_start:test_start, :], y[val_start:test_start, :],\n index[val_start:test_start],\n x[test_start:test_end, :], y[test_start:test_end, :],\n index[test_start:test_end]\n )\n\n\n\n else:\n split = ( x[train_start:val_start, :], y[train_start:val_start, :],\n x[val_start:test_start, :], y[val_start:test_start, :],\n x[test_start:test_end, :], y[test_start:test_end, :]\n )\n\n return split", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def partition_data(self):\n\n _header_ = self._header_ + 'partition_data(): '\n\n if self.verbose:\n print(_header_ + 'Partitioning data ...')\n\n network = self._useful_network()\n\n if self.nidx_train:\n # The only reason that allows .nidx to not be empty would be that a training Data was copied over\n # hence, the training node indices are retained and need to be excluded\n print(_header_ + 'Excluding %d training nodes transfered from training dataset ...' % len(self.nidx_train))\n nidx = set(self.nidx2lidx.keys()) - set(self.nidx_train)\n self.nidx_exclude += self.nidx_train\n self.nidx_train = []\n else:\n nidx = set(self.nidx2lidx.keys())\n\n for l in nidx:\n if l in network:\n if self.node_labels[l]:\n self.nidx_train.append(l)\n else:\n self.nidx_exclude.append(l)\n\n if self.verbose:\n print(_header_ + 'Found %d nodes' % len(self.nidx2lidx))\n print(' %d nodes with labels of interest' % len(self.nidx_train))\n print(' %d nodes can be used to predict' % len(self.nidx_pred))\n print(' %d nodes cannot be mapped due to lack of mappable links' % len(self.nidx_exclude))\n\n return self", "def test_split_adds_children(mock_amg):\n\n mock_amg.cells[0].split()\n assert mock_amg.cells[0].children['bl'] is mock_amg.cells[-4]\n assert mock_amg.cells[0].children['br'] is mock_amg.cells[-3]\n assert mock_amg.cells[0].children['tl'] is mock_amg.cells[-2]\n assert mock_amg.cells[0].children['tr'] is mock_amg.cells[-1]", "def chunk(self, shape, split) -> NotImplementedError:\n raise NotImplementedError()", "def split_test_data():\n outputvis = ROOT_DIR + 'test_imaging/test_split_1eb.ms'\n targ = TARGETS['NGC1333IRAS4A']\n spw = '{0}:236~276'.format(SPWS[targ.name]['NH3_11'].spw_id)\n split(\n vis=get_vis_name(targ),\n outputvis=outputvis,\n field=targ.name,\n spw=spw,\n )", "def _split_forced(self, node):\n # compute the split\n _vec = 0\n sorted_idxs = np.argsort(self.E[node.ids, _vec]).squeeze()\n n = len(sorted_idxs) // 2\n _lids = node.ids[sorted_idxs[:n]]\n _rids = node.ids[sorted_idxs[n:]]\n # compute the score of the new tubes only\n _sl = self.get_tube_score(_lids)\n _sr = self.get_tube_score(_rids)\n # register the split\n node.has_children = True\n node.thresh = np.median(self.E[node.ids, _vec]) # arbitrary\n # Note: median would not ensure equal size (because of duplicate values)\n left = SpectralNode(_lids, _vec, score=_sl, name=node.name + \"0\")\n right = SpectralNode(_rids, _vec, score=_sr, name=node.name + \"1\")\n\n return left, right", "def partition(self, data, labels):\n\t\traise Exception(\"Not implmented\")", "def nextSplit(self):\n pass", "def split_data(self, model_data, tuning=True):\n pass", "def _create_split(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n helper.make_attribute('split', op.parts),\n ])\n return node", "def splitData(df, split):\n train = df.iloc[:int(len(df)*split)]\n test = df.iloc[int(len(df)*split):]\n \n return train, test", "def split_dataset(x_test, y_test, dev_ratio):\n test_size = len(x_test)\n print(test_size)\n dev_size = (int)(test_size * dev_ratio)\n print(dev_size)\n x_dev = x_test[:dev_size]\n x_test = x_test[dev_size:]\n y_dev = y_test[:dev_size]\n y_test = y_test[dev_size:]\n return x_test, y_test", "def splitter (data1, data2):\n flow_data = list()\n fare_record_data = list()\n\n for line in data1:\n line = [line[2:6],line[6:10],line[10:15],line[15:18],line[18],line[19],line[36:39],line[20:28],line[28:36],line[42:49]]\n flow_data.append(line)\n\n flow = pd.DataFrame(flow_data, columns=[\"ORIGIN_CODE\",\"DESTINATION_CODE\",\"ROUTE_CODE\",\"STATUS_CODE\",\"USAGE_CODE\",\"DIRECTION\",\"TOC\",\"VALID_UNTIL\",\"VALID_FROM\",\"FLOW_ID\"])\n flow['ROUTE_CODE'] = flow['ROUTE_CODE'].astype(object)\n flow.index.name=\"flow_idx\"\n\n for line in data2:\n line=[line[2:9],line[9:12],line[12:20]]\n fare_record_data.append(line)\n\n fare_record = pd.DataFrame(fare_record_data, columns=[\"FLOW_ID\",\"TICKET_CODE\",\"FARE\"])\n fare_record.index.name = \"fare_record_idx\"\n\n return flow,fare_record", "def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)", "def split_data(self, data):\n\n train_df, test_df = train_test_split(data, test_size=self.test_size, \n random_state=0, \n stratify=data[self.outcome_name])\n\n # print(\"Splitting data into training with \", train_df.shape, \"sampes and \",\n # test_df.shape, \"testing samples\")\n\n return train_df, test_df", "def split_data(dataset, ratio = 0.9):\n cutoff_row = int(dataset.shape[0] * ratio)\n return (dataset[:cutoff_row], dataset[cutoff_row:])", "def create_split(self) -> NoReturn:\n raise NotImplementedError", "def split_data(paragraphs):\n para_count = len(paragraphs)\n training_index = int(para_count * 0.7)\n validation_index = int(para_count * 0.9)\n training_data = paragraphs[:training_index]\n validation_data = paragraphs[training_index:validation_index]\n test_data = paragraphs[validation_index:]\n return training_data, validation_data, test_data", "def split_dataset(dataset):\n sequence = dataset.split()\n return sequence", "def split_dataset(dataset):\n sequence = dataset.split()\n return sequence", "def data_splitting(data_features, data_targets, test_size):\n data_features_train, data_features_test, data_targets_train, data_targets_test = \\\n train_test_split(data_features,\n data_targets,\n test_size = test_size)\n\n return data_features_train, data_features_test, data_targets_train, data_targets_test", "def data_splitting(data_features, data_targets, test_size):\n data_features_train, data_features_test, data_targets_train, data_targets_test = \\\n train_test_split(data_features,\n data_targets,\n test_size = test_size)\n\n return data_features_train, data_features_test, data_targets_train, data_targets_test", "def split_data(test_data, split_ratio):\n split_index = int(split_ratio * len(test_data))\n \n # randomly permute the values in place\n random.shuffle(test_data)\n \n # take slices of the determined size\n training_set = copy.copy(test_data[:split_index])\n test_data = copy.copy(test_data[split_index:])\n\n return training_set, test_data", "def split_data(self, data, ratio=0.7, shuffle=True, seed=0):\n if shuffle:\n random.seed(seed) # fix to default seed 0\n random.shuffle(data)\n\n size = int(len(data) * ratio)\n data_1 = data[:size]\n data_2 = data[size:]\n\n return data_1, data_2", "def partition(self, data, labels):\n\t\tfor i in range(self.splits):\n\t\t\tyield self.makePartition(len(labels))", "def addSplit(self):\n pass", "def split(self, splits, catchall=False):\r\n raise NotImplementedError()", "def split_dataset(self, split):\n trunk_pos_size = math.ceil((1 - split) * len(self.Pos))\n trunk_neg_size = math.ceil((1 - split) * len(self.Neg))\n trunk_num = int(1 / (1 - split))\n pos_temp = list()\n neg_temp = list()\n for index in range(trunk_num):\n pos_temp.append(self.Pos[index * trunk_pos_size:(index + 1) *\n trunk_pos_size])\n neg_temp.append(self.Neg[index * trunk_neg_size:(index + 1) *\n trunk_neg_size])\n self.test = pos_temp.pop(2) + neg_temp.pop(2)\n # self.train = [i for item in pos_temp + neg_temp for i in item]\n self.train = []\n for item in pos_temp + neg_temp:\n for i in item:\n self.train.append(i)\n\n random.shuffle(self.train)\n random.shuffle(self.test)", "def segment(data):", "def testSplit(self):\n\n protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()\n bigstring = \"\".join(chr(byte) for byte in range(ord(\"a\"), ord(\"z\")+1))\n\n databuf = TTransport.TMemoryBuffer()\n prot = protocol_factory.getProtocol(databuf)\n prot.writeI32(42)\n prot.writeString(bigstring)\n prot.writeI16(24)\n data = databuf.getvalue()\n cutpoint = len(data)/2\n parts = [ data[:cutpoint], data[cutpoint:] ]\n\n framed_buffer = TTransport.TMemoryBuffer()\n framed_writer = TTransport.TFramedTransport(framed_buffer)\n for part in parts:\n framed_writer.write(part)\n framed_writer.flush()\n self.assertEquals(len(framed_buffer.getvalue()), len(data) + 8)\n\n # Recreate framed_buffer so we can read from it.\n framed_buffer = TTransport.TMemoryBuffer(framed_buffer.getvalue())\n framed_reader = TTransport.TFramedTransport(framed_buffer)\n prot = protocol_factory.getProtocol(framed_reader)\n self.assertEqual(prot.readI32(), 42)\n self.assertEqual(prot.readString(), bigstring)\n self.assertEqual(prot.readI16(), 24)", "def split_chunk(chunk, *a, **kw):\n return split_chunk(chunk, *a, **kw)", "def partition(self, data, labels):\n\t\treturn self.kfold.split(labels)", "def split_tiles(module_data):\n raise NotImplementedError", "def split_data(dataset_x, dataset_y, split_ratio):\n num_examples = len(dataset_x)\n training_x = dataset_x[:int(num_examples*split_ratio)]\n training_y = dataset_y[:int(num_examples*split_ratio)]\n\n validation_x = dataset_x[int(num_examples*split_ratio): num_examples]\n validation_y = dataset_y[int(num_examples*split_ratio): num_examples]\n\n training_y = np.asarray(training_y, dtype='float32')\n validation_y = np.asarray(validation_y, dtype='float32')\n return training_x, training_y, validation_x, validation_y", "def split(data):\n return data[:len(data) // 2], data[len(data) // 2:]", "def test_split_data():\n from parrot import process_input_data as pid\n\n data_file = os.path.abspath(\"../data/seq_class_dataset.tsv\")\n train, val, test = pid.split_data(data_file, datatype='sequence',\n problem_type='classification', num_classes=3)\n\n assert (len(train) == 210) and (len(val) == 45) and (len(test) == 45) and (len(train[0]) == 3)", "def split_data(data_dir, split_sum, split_count):\n with open(data_dir + \"data.json\") as raw_file:\n raw_data = json.load(raw_file)[\"list_string\"]\n\n raw_data_dict = {}\n raw_data_dict[\"pos\"] = \"\"\n raw_data_dict[\"negation_label\"] = 0\n raw_data_dict[\"error_label\"] = 0\n raw_data_dict[\"semantic_label\"] = 0\n\n for i in range(0, split_sum, split_count):\n raw_data_split = []\n raw_data_range = raw_data[i:i+split_count]\n\n for pos in raw_data_range:\n raw_data_dict[\"pos\"] = pos\n raw_data_split.append(raw_data_dict.copy())\n\n split_dir = data_dir + \"data_\" + str(i) + \"_\" + str(i+split_count) + \".json\"\n with open(split_dir, \"w\") as out_file:\n json.dump(raw_data_split, out_file, indent=4)", "def split_data(data, labels):\r\n # Split the data into train and test\r\n X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.30, random_state = 42)\r\n return(X_train, y_train, X_test, y_test)", "def DataSplit(self, data):\n train_X,test_X,train_y,test_y=train_test_split(data[0],data[1], random_state=2)\n valid_X,valid_y=train_test_split(data[0],data[1],random_state=2,test_size=0.15)[1],train_test_split(data[0],data[1],random_state=2,test_size=0.15)[3]\n return (train_X,test_X,valid_X,train_y,test_y,valid_y)", "def split_edge(self,j,**node_args):\n nA,nC=self.edges['nodes'][j]\n assert np.all( self.edge_to_cells(j) < 0 )\n\n edge_data=rec_to_dict(self.edges[j].copy())\n \n self.delete_edge(j)\n\n # choose midpoint as default\n loc_args=dict(x= 0.5*(self.nodes['x'][nA] + self.nodes['x'][nC]))\n loc_args.update(node_args) # but args will overwrite\n\n nB=self.add_node(**loc_args)\n edge_data['nodes'][1]=nB\n self.add_edge(_index=j,**edge_data)\n # this way we get the same cell marks, too.\n # this helps in tracking marks like UNDEFINED vs. UNPAVED\n edge_data['nodes']=[nB,nC]\n jnew=self.add_edge(**edge_data)\n return jnew,nB", "def split_data_into_train_and_test(raw_training_data):\n train_set, test_set = train_test_split(raw_training_data, test_size=0.2, random_state=42)\n return train_set, test_set", "def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il", "def split_data(x, y, ratio, seed=1):\n # set seed\n np.random.seed(seed)\n packed = np.vstack([y,x]).T\n np.random.shuffle(packed)\n N = y.shape[0]\n eightyN = int(ratio*N)\n xTrain = packed[0:eightyN,1]\n yTrain = packed[0:eightyN,0]\n xTest = packed[eightyN:N, 1]\n yTest = packed[eightyN:N,0]\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n return xTrain, yTrain, xTest, yTest", "def partition(self, data, labels):\n\t\t#TODO remove\n\t\tprint(\"label shape {}\".format(labels.shape))\n\t\treturn self.kfold.split(data[0], labels)", "def train_val_test_split(data):\n raise NotImplementedError", "def split_data(X, scaling, ids, y, split_ratio=0.2):\r\n split = int(X.shape[0] * split_ratio) # index must be int\r\n X_test = X[:split, :, :, :]\r\n scaling_test = scaling[:split, :]\r\n ids_test = ids[:split]\r\n y_test = y[:split, :]\r\n X_train = X[split:, :, :, :]\r\n scaling_train = scaling[split:, :]\r\n ids_train = y[split:]\r\n y_train = y[split:, :]\r\n\r\n return X_train, scaling_train, ids_train, y_train, X_test, scaling_test, ids_test, y_test", "def _build_from_chunks(self, data_node):\n result = ''\n\n if not data_node:\n return ''\n\n master_data = data_node[0]\n result = \"{}{}\".format(result, self._decode(master_data['value']))\n # if data is not in chunks, then return the first node's value\n if 'tags' not in master_data or 'chunks' not in master_data['tags']:\n return result\n\n # join the values in chunks\n last_chunk = int(master_data['tags']['chunks'])\n for chunk_id in range(1, last_chunk):\n slave_data = data_node[chunk_id]\n result = \"{}{}\".format(result, self._decode(slave_data['value']))\n return result", "def split_data(self):\r\n print('split data')\r\n np.random.shuffle(self.dataList)\r\n l = len(self.dataList)/self.fold\r\n self.dataList = [self.dataList[i*l: (i+1)*l] for i in range(self.fold-1)] + [self.dataList[(self.fold-1)*l:]] # each element in the list is splitted data list\r", "def operator_splitting(data, func_transport, func_collision):\n # executing time step\n func_transport(data)\n func_collision(data)\n assert np.all(data.state >= 0)\n data.t += 1\n return", "def split_data(train_percentage, *data):\n train = [entry[0:int(train_percentage * len(entry))] for entry in data]\n val = [entry[int(train_percentage * len(entry)):] for entry in data]\n return train, val", "def split(self, x):\r\n if x >= self.n2.x or x <= self.n1.x: return [self]\r\n n_intermediate = Node.MiddleNode(x=x)\r\n bar1 = BeamElement(nodes=[self.n1, n_intermediate], section=self.section, material=self.material)\r\n bar2 = BeamElement(nodes=[n_intermediate, self.n2], section=self.section, material=self.material)\r\n return [bar1, bar2]", "def split(self, user, number=2, piece='a', comment=None, force_refresh=True):\n if comment is None:\n comment = 'Split sample into {0} pieces'.format(number)\n\n process = Process.objects.create(title='Split Sample',\n comment=comment,\n user=user,\n type_id='split-process')\n nodes = []\n\n branch = self.get_piece(piece)\n for i in range(number):\n if i == 0:\n new_piece = piece\n else:\n new_piece = self._get_next_piece()\n # Note: Issue #248 in django-mptt causes the tree to not be properly\n # updated when inserting objects if parent is set. Workaround\n # is to set parent_id instead. This fixes methods such as\n # MPTTModel.get_descendants(). Since you are referencing an\n # object that has changed in the database (process_tree),\n # the lft and rght items are not updated properly. Workarounds\n # include manually updating the root node or requerying for\n # the sample object which will force a refresh.\n nodes.append(self._insert_node(process, new_piece, i + 1, branch))\n if force_refresh: # workaround to force the root node to update\n self.refresh_tree()\n return nodes", "def test_add_empty_nodes_with_label_when_splitting(self):\n print \"----- test_add_empty_nodes_with_label_when_splitting -----\"\n sel_axis = (lambda axis: axis)\n \n #create tree, first node splits in x direction\n tree = kdtree.createNewTree([[0.5, 0.5]],axis = 0, sel_axis= sel_axis)\n kdtree.visualize(tree)\n \n point_left = [0.4, 0.5]\n tree.split2(point_left, axis = 0)\n kdtree.visualize(tree)\n \n point1 = [0.3, 0.5]\n found_node = tree.get_path_to_leaf(point1)[-1]\n correct_node1 = 3\n self.assertEqual(found_node.label, correct_node1, \"Not correct node found\")\n \n point_right = [0.6, 0.5]\n tree.split2(point_right, axis = 1)\n kdtree.visualize(tree)\n \n point2 = [0.6, 0.7]\n found_node = tree.get_path_to_leaf(point2)[-1]\n correct_node2 = 6\n self.assertEqual(found_node.label, correct_node2, \"Not correct node found\")\n \n print \"----- end: test_add_empty_nodes_with_label_when_splitting -----\"", "def test_split(self):\n array = np.arange(1000)\n df = DataFlow.from_numpy(array)\n\n # first, test throw errors on invalid arguments\n def assert_invalid_arg(**kwargs):\n with self.assertRaises(ValueError):\n df.split(**kwargs)\n assert_invalid_arg(partitions=[])\n assert_invalid_arg(partitions=[1000, 1])\n assert_invalid_arg(partitions=[1000, -1])\n assert_invalid_arg(partitions=[1, 2])\n assert_invalid_arg(portions=[])\n assert_invalid_arg(portions=[1.0, 0.1])\n assert_invalid_arg(portions=[1.0, -1])\n assert_invalid_arg(portions=[0.1, 0.2])\n\n # next, test split without shuffling\n df1, df2, df3 = df.split(partitions=[700, 200, 100])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n df1, df2, df3 = df.split(portions=[-1, 0.2, 0.1])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n\n # finally, test split with shuffling\n df1, df2 = df.split(portions=[0.5, -1], shuffle=True)\n self.assertEquals(len(df1), 500)\n self.assertEquals(len(df2), 500)\n df_array = np.concatenate([df1.all()[0], df2.all()[0]], axis=0)\n self.assertFalse(np.all(df_array == array))\n np.testing.assert_array_equal(np.sort(df_array), array)", "def split_next(self):\n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (sample_indices_left,\n sample_indices_right,\n right_child_pos) = self.splitter.split_indices(node.split_info,\n node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n parent=node)\n right_child_node = TreeNode(depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n parent=node)\n left_child_node.sibling = right_child_node\n right_child_node.sibling = left_child_node\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n self.n_nodes += 2\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if (self.max_leaf_nodes is not None\n and n_leaf_nodes == self.max_leaf_nodes):\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n # Compute histograms of childs, and compute their best possible split\n # (if needed)\n should_split_left = left_child_node.value is None # node isn't a leaf\n should_split_right = right_child_node.value is None\n if should_split_left or should_split_right:\n\n # We will compute the histograms of both nodes even if one of them\n # is a leaf, since computing the second histogram is very cheap\n # (using histogram subtraction).\n n_samples_left = left_child_node.sample_indices.shape[0]\n n_samples_right = right_child_node.sample_indices.shape[0]\n if n_samples_left < n_samples_right:\n smallest_child = left_child_node\n largest_child = right_child_node\n else:\n smallest_child = right_child_node\n largest_child = left_child_node\n\n # We use the brute O(n_samples) method on the child that has the\n # smallest number of samples, and the subtraction trick O(n_bins)\n # on the other one.\n tic = time()\n smallest_child.histograms = \\\n self.histogram_builder.compute_histograms_brute(\n smallest_child.sample_indices)\n largest_child.histograms = \\\n self.histogram_builder.compute_histograms_subtraction(\n node.histograms, smallest_child.histograms)\n self.total_compute_hist_time += time() - tic\n\n tic = time()\n if should_split_left:\n self._compute_best_split_and_push(left_child_node)\n if should_split_right:\n self._compute_best_split_and_push(right_child_node)\n self.total_find_split_time += time() - tic\n\n return left_child_node, right_child_node", "def split_samples(data):\n\n training_samples = data[0:9497]\n test_samples = data[9497:11300]\n\n return training_samples, test_samples", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n # Split dataset into train and test dataset\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],train_size=train_percentage)\r\n return train_x, test_x, train_y, test_y", "def split(bn, node, Z=None):\n # no estoy seguro si hace falta crear nuevas variables, pero mas\n # vale prevenir que lamentar\n for v in bn.V:\n if v.name == node:\n node = v\n break\n if Z is None:\n Z = bn.G[node]\n\n V = [RandomVariable(var.name, var.Domain) for var in bn.V]\n\n # mapeo de variables viejas a las nuevas. Sirve mas adelante para\n # crear la lista de arcos\n old2new = dict(zip(bn.V, V))\n\n # crear el nuevo nodo y agregarlo a la lista de variables\n newnode = RandomVariable(node.name + \"*\", node.Domain)\n V.append(newnode)\n\n # crear la lista de arcos\n E = []\n for parent, children in bn.G.iteritems():\n for child in children:\n if parent == node and child in Z:\n E.append((newnode, old2new[child]))\n else:\n E.append((old2new[parent], old2new[child]))\n\n # calcular los CPTs\n\n # para cada variable no afectada por el split + variable papa: copiar cpts\n for v in bn.V:\n if not (v in Z):\n old2new[v].cpt = Factor([old2new[e] for e in v.cpt.domain()], v.cpt.getfunction())\n\n # para la variable creada por el split: cpt uniforme\n newnode.cpt = Factor([newnode], [1.0/len(newnode.domain()) for e in newnode.domain()])\n\n # para cada variable hijo afectada por el split: cpt igual a anterior, pero\n # con lista de papas cambiada\n def cp(e):\n if e == node:\n return newnode\n\n return old2new[e]\n\n for v in Z:\n old2new[v].cpt = Factor([cp(e) for e in v.cpt.domain()], v.cpt.getfunction())\n \n name = bn.name + \" splitted\"\n beta = 1\n return DBN(V,E,name,\"\"), newnode", "def split_data(self, data, start, interval, shuffle=True, seed=0):\n if shuffle:\n random.seed(seed) # fix to default seed 0\n random.shuffle(data)\n\n data_1 = np.append(data[:start],data[start+interval:])\n data_2 = data[start:start+interval]\n\n return data_1, data_2", "def data_split(data, labels, train_ratio=0.5, rand_seed=42):\n\n assert 0 <= train_ratio <= 1, \"Error: training set ratio must be between 0 and 1\"\n\n x_train, x_temp, y_train, y_temp = train_test_split(data,\n labels,\n train_size=train_ratio,\n random_state=rand_seed)\n\n x_val, x_test, y_val, y_test = train_test_split(x_temp,\n y_temp,\n train_size=0.5,\n random_state=rand_seed)\n\n return x_train, x_val, x_test, y_train, y_val, y_test", "def splitEdge(self, edge, index, link):\n #print 'Splitting edge', edge\n node = Node(len(self.nodes))\n if link:\n suffix_link = SuffixLink(node)\n self.latest_node.addSuffixLink(suffix_link)\n # copy out existing destination and bounds\n old_dest = edge.getDestination()\n old_bounds = edge.getSuffix()\n old_start = old_bounds[0]\n # Adjust edge finish point to n + index, connect new node\n edge.setBound(stop = old_start + index)\n edge.setDestination(node) \n # Create new edge representing the remains of the old edge\n offcut = Edge(self.target[old_start + index], old_start + index, \n old_bounds[1], old_dest)\n # Add the offcut edge as a child of the new node\n node.addChild(offcut) \n # Create new edge for the current pos\n n = self.pos.getVal()\n new_edge = Edge(self.target[n], n, self.pos)\n node.addChild(new_edge)\n self.nodes.append(node)\n self.latest_node = node\n # rule 1 - a split from the root node\n if self.active_node == self.root:\n # active length decrements\n self.active_length -= 1\n pos = self.pos.getVal()\n # active edge changes\n self.active_edge = self.target[pos - self.active_length]\n # active node remains root\n else:\n if self.active_node.hasSuffixLink():\n # Set the active node to the link destination\n link = self.active_node.getSuffixLink()\n self.active_node = link.getDestination()\n else:\n self.active_node = self.root # set active node to root", "def test_twoing(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([2]), set([0, 1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:1, 1:1, 2:0})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.48)", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\r\n train_size=train_percentage, random_state=42)\r\n return train_x, test_x, train_y, test_y", "def split_X_y(data):\n return data.drop(columns=\"price\"), data[\"price\"]", "def split_data(x, y, ratio, seed=1):\n # number of value\n num_points = len(y)\n # compute the index that split the datas\n split = int(np.floor(num_points * ratio))\n\n # set the seed to the given value\n np.random.seed(seed)\n # compute random indexes for training and testing\n rand_indexes = np.random.permutation(num_points)\n index_training = rand_indexes[:split]\n index_testing = rand_indexes[split:]\n\n return x[index_training], y[index_training], x[index_testing], y[index_testing]", "def test_twoing(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.5)", "def split(self, attribute):\n if attribute not in self.attributes:\n raise KeyError('Attribute not present in node')\n \n self.split_attr = attribute\n \n # list() is used to make a copy of the list instead of pointing to the same list\n child_attributes = list(self.attributes)\n child_attributes.remove(attribute)\n \n child1_ancestors = list(self.ancestors)\n child0_ancestors = list(self.ancestors)\n child1_ancestors.append(attribute_value(attribute, 1))\n child0_ancestors.append(attribute_value(attribute, 0))\n \n self.val1 = Node(child_attributes, child1_ancestors, self.data, self.heuristic)\n self.val0 = Node(child_attributes, child0_ancestors, self.data, self.heuristic)", "def split_data(x, y, ratio, seed=1):\n # set seed\n np.random.seed(seed)\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n \n def split_data(x, y, ratio, seed=1):\n \"\"\"split the dataset based on the split ratio.\"\"\"\n # set seed\n np.random.seed(seed)\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n trainDataLen = round(len(y)*ratio)\n \n trainDataID = random.sample(range(len(y)), trainDataLen)\n \n # USing bool value to obtaint he remainling data for validation data set\n validDataID = np.array(range(len(y))) + 1\n validDataID[trainDataID] = 0\n validDataID = validDataID >0\n \n \n # obtain the trainning data\n trainDataX = x[trainDataID]\n trainDataY = y[trainDataID]\n \n # obtain the validation data\n validDataX = x[validDataID]\n validDataY = y[validDataID] \n \n return trainDataX,trainDataY, validDataX, validDataY\n \n #raise NotImplementedError", "def split_data(self,test=False):\n shuffle_index = torch.randperm(self.train_target.shape[0])\n load = shuffle_index.shape[0]\n train_input_shuffle = self.train_input[shuffle_index]\n train_target_shuffle = self.train_target[shuffle_index]\n train_classes_shuffle = self.train_classes[shuffle_index]\n index_train = self.index_for_equal_class(train_target_shuffle[:load//2])\n train_input = train_input_shuffle[index_train]\n train_target = train_target_shuffle[index_train]\n train_classes = train_classes_shuffle[index_train]\n if not test:\n index_test = self.index_for_equal_class( train_target_shuffle[load//2:]) + load//2\n test_input = train_input_shuffle[index_test]\n test_target = train_target_shuffle[index_test]\n test_classes = train_classes_shuffle[index_test]\n else:\n index_test = self.index_for_equal_class(self.test_target)\n test_input = self.test_input[index_test]\n test_target = self.test_target[index_test]\n test_classes = self.test_classes[index_test]\n train_input, mean, std = normalize(train_input)\n test_input, _, _ = normalize(test_input,mean,std)\n return train_input, train_target, train_classes ,test_input ,test_target ,test_classes", "def dataSplit(self,df):\n X = df['message']\n y = df['label']\n return X, y", "def split_records(self, data):\n byte_array = bytearray(data)\n size = len(byte_array)\n split_data = [bytearray()]\n for index, byte in enumerate(byte_array):\n if index != size-1 and byte == 143 and byte_array[index+1] == 142:\n print(\"found delimeter byte 143,142 b'8f8e'\")\n split_data[-1].append(byte)\n split_data.append(bytearray())\n print(\"start new record\")\n else:\n split_data[-1].append(byte)\n return split_data", "def split(self, node, width, height):\r\n node.used = True\r\n node.down = SquareAlgorithmNode(x=node.x,\r\n y=node.y + height,\r\n width=node.width,\r\n height=node.height - height)\r\n node.right = SquareAlgorithmNode(x=node.x + width,\r\n y=node.y,\r\n width=node.width - width,\r\n height=height)\r\n return node", "def splits(cls, exts, fields, root='/Users/yingliu/PycharmProjects/Seq2SeqSemantic/data/',\n train='train2', validation='val2', test='test2', **kwargs):\n return super(SPDataset, cls).splits(\n exts, fields, path=root,\n train = train, validation = validation, test = test, **kwargs)", "def split_to_windows(self, data):\n return", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\n \n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y", "def split(x, y, t):\n return (Shape(t, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk),\n NodeReservation(Shape(x.wallTime - t, x.memory, x.cores, x.disk)))", "def _split_data(self, x, y):\n\tindices = range(self.N)\n\tnp.random.shuffle(indices)\n\ttrain_idx, test_idx = indices[:self.TRAIN_SIZE], indices[self.TRAIN_SIZE:]\n\treturn (x[train_idx,:], y[train_idx,:], x[test_idx,:], y[test_idx,:])", "def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]", "def split(self):\n return self.dataset_split", "def split_at_nodes(shp):\n nodes = find_nodes(shp)\n nodeIds = list(nodes)\n nodeIds.sort()\n nodeIds = dict([(node,i) for i,node in enumerate(nodeIds)])\n \n for road in shp:\n vrts = road.vertices\n midVrts = set(road.vertices[1:-1]) #we know end points are nodes\n midNodes = midVrts.intersection(nodes) # find any nodes in the middle of the feature.\n midIdx = [vrts.index(node) for node in midNodes] # Get their indices\n midIdx.sort()\n if midIdx:\n #print vrts\n starts = [0]+midIdx\n stops = [x+1 for x in midIdx]+[None]\n for start,stop in zip(starts,stops):\n feat = pysal.cg.Chain(vrts[start:stop])\n rec = (nodeIds[feat.vertices[0]],nodeIds[feat.vertices[-1]],False)\n yield feat,rec\n else:\n rec = (nodeIds[road.vertices[0]],nodeIds[road.vertices[-1]],False)\n yield road,rec", "def splits(cls, text_field, label_field, root='../data',\n train='train.txt', validation='valid.txt', test='test.txt'):\n print(\"root path for relation dataset: {}\".format(root))\n path = cls.download_or_unzip(root)\n prefix_fname = 'annotated_fb_data_'\n return super(SimpleQaRelationDataset, cls).splits(\n os.path.join(path, prefix_fname), train, validation, test,\n format='TSV', fields=[('subject', None), ('relation', label_field), (object, None), ('question', text_field)]\n )", "def splitPlace(self):\r\n \r\n \r\n \r\n nodeSortedIter = sorted(self.G.degree_iter(),key=itemgetter(1),reverse=True)\r\n \r\n placeCnt = 0\r\n \r\n for node in nodeSortedIter:\r\n if placeCnt<self.cells/2:\r\n self.sitesA.append(node[0])\r\n self.G.node[node[0]][\"part\"] = 'A'\r\n else:\r\n self.sitesB.append(node[0])\r\n self.G.node[node[0]][\"part\"] = 'B'\r\n placeCnt+=1" ]
[ "0.6493564", "0.6465813", "0.63918084", "0.62302977", "0.6212308", "0.6188923", "0.6174351", "0.61242145", "0.61228055", "0.60877246", "0.6079287", "0.6019898", "0.60130966", "0.598436", "0.59603816", "0.5949242", "0.59408516", "0.59373665", "0.59328663", "0.59160835", "0.59151304", "0.5910113", "0.589416", "0.5870395", "0.58603656", "0.58574766", "0.58439744", "0.5838773", "0.583524", "0.58242244", "0.5822505", "0.58004266", "0.57806057", "0.57481104", "0.5731326", "0.5730209", "0.57283646", "0.57283485", "0.57283485", "0.57109267", "0.57109267", "0.5702897", "0.5687889", "0.5681845", "0.56662863", "0.5660321", "0.56530136", "0.5652939", "0.56390643", "0.5637234", "0.5620496", "0.5614657", "0.56062615", "0.5600912", "0.55961883", "0.5588937", "0.55884653", "0.55880845", "0.55818874", "0.55603915", "0.55557764", "0.55508935", "0.55370677", "0.55366176", "0.5535357", "0.55242467", "0.5516525", "0.55140597", "0.5504245", "0.55015266", "0.5488301", "0.5487308", "0.5485011", "0.54679024", "0.54678696", "0.5466651", "0.54509526", "0.54499584", "0.5448844", "0.54464996", "0.54450345", "0.54408836", "0.54400194", "0.54327023", "0.5432028", "0.5428624", "0.54273444", "0.5423394", "0.541983", "0.54152036", "0.54145694", "0.5409789", "0.54029006", "0.5402304", "0.53903955", "0.53873795", "0.538704", "0.53869796", "0.5386451", "0.5375477", "0.5374407" ]
0.0
-1
Give each Node random splits of data. Nodes will have different amounts of data.
def random_split(self, nr_agents): np.random.seed(self.random_seed) # Get random indices indices = sorted(np.random.randint(0, high=self.samples.shape[0], size=nr_agents - 1).tolist()) indices = [0] + indices indices += [self.samples.shape[0]] self.samples = self.partition(self.samples, indices, nr_agents) self.labels = self.partition(self.labels, indices, nr_agents)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_test_split(self):\n random.seed(self.args.seed)\n nodes = [node for node in range(self.ncount)]\n random.shuffle(nodes)\n self.train_nodes = torch.LongTensor(nodes[0:self.args.training_size])\n self.validation_nodes = torch.LongTensor(nodes[self.args.training_size:self.args.training_size+self.args.validation_size])\n self.test_nodes = torch.LongTensor(nodes[self.args.training_size+self.args.validation_size:])", "def split(self):\n\n ratio_c = 1 - self.ratio\n self.train, self.test = self.df.randomSplit([self.ratio, ratio_c], seed=12345)", "def non_iid_split(self, nr_agents, class_per_node, random):\n unique = list(set(self.labels.tolist()))\n len_unique = len(unique)\n\n # Create array that assigns a class to specific nodes\n # Use 'np.arange' to ensure every class is represented before repeating\n # A row represents nr_agents, a column represents classes per node\n agent_class_master = np.arange(start=0, stop=nr_agents * class_per_node) % len_unique\n np.random.shuffle(agent_class_master)\n agent_class_master = agent_class_master.reshape(nr_agents, class_per_node)\n\n # Split data by labels\n sample_list = [[] for _ in range(len_unique)]\n for i in range(len(self.labels)):\n sample_list[self.labels[i]].append(self.samples[i])\n\n # By class creates uniform or random indices splits to partition data to agents evenly\n class_count = np.bincount(agent_class_master.ravel())\n class_indices = {}\n for i in range(len(class_count)):\n if random:\n indices = sorted(np.random.randint(0, high=len(sample_list[i]), size=class_count[i] - 1).tolist())\n indices = [0] + indices\n indices += [len(sample_list[i])]\n class_indices[i] = indices\n else:\n class_indices[i] = np.linspace(start=0, stop=len(sample_list[i]), num=class_count[i] + 1,\n dtype=int).tolist()\n\n # Main loop that partitions data by the assigned class and proper amount\n all_agents = []\n all_class = []\n for agent in agent_class_master:\n agent_data = []\n agent_class = []\n for cls in agent:\n # Proportioned indices for data and grab correctly indexed data\n temp_indices = class_indices[cls]\n data_for_agent = sample_list[cls][temp_indices[0]:temp_indices[1] - 1]\n\n # Add data and class to this agents list\n agent_data = agent_data + data_for_agent\n agent_class = agent_class + [cls for _ in range(len(data_for_agent))]\n\n # Drop first index since we used that data, forces next person to use next index\n class_indices[cls] = temp_indices[1:]\n\n # Append agents data and class labels in order\n all_agents.append(torch.stack(agent_data))\n all_class.append(torch.tensor(agent_class))\n\n self.samples = all_agents\n self.labels = all_class", "def partition(data, n):\n splits = []\n remaining = data.copy(deep=True)\n for i in range(n):\n split = remaining.sample(frac=1/(n-i), random_state=10)\n splits.append(split)\n remaining = remaining.drop(split.index)\n return splits", "def split_data(self):\r\n print('split data')\r\n np.random.shuffle(self.dataList)\r\n l = len(self.dataList)/self.fold\r\n self.dataList = [self.dataList[i*l: (i+1)*l] for i in range(self.fold-1)] + [self.dataList[(self.fold-1)*l:]] # each element in the list is splitted data list\r", "def randsplit(data, sections=2):\n ret = [[] for i in range(sections)]\n for item in data:\n ret[random.randrange(sections)].append(item)\n return ret", "def train_test_split(dataset, split):\r\n train = list()\r\n train_size = split * len(dataset)\r\n dataset_copy = list(dataset) \r\n while len(train) < train_size:\r\n index = randrange(len(dataset_copy))\r\n train.append(dataset_copy.pop(index))\r\n return train, dataset_copy", "def load_random_data(self, parts, nodes, max_nodes):\n\n self.parts = parts\n self.nodes = nodes\n self.max_nodes = max_nodes\n\n if self.verbose:\n print 'Generating random data using nodes:' + str(nodes) + \\\n ' parts:' + str(parts) + ' max nodes:' + str(max_nodes)\n\n node_list = []\n node_list.extend(range(1, nodes))\n\n # for each part we want to add a random number of nodes from the node list\n for i in range(1, parts):\n self.data_dict[i] = random.sample(node_list, random.randint(2, max_nodes))", "def get_split_data(self):\n X, y, _, _ = self.get_subsets()\n return train_test_split(X, y, test_size=0.3, random_state=42)", "def splitData(data, class_label, seed, ratio):\n\t\n\trandom.seed(seed)\n\tsubset = data.clone()\n\tsize_data = subset.data.shape[0]\n\tn = int(np.floor(size_data * ratio)) # number of datasets in train\n\tindex = random.sample(range(1, size_data), n)\n\tsplit_list = [item for item in [0] for i in range(size_data)]\n\t\n\tfor i in index:\n\t\tsplit_list[i]=1\n\t\n\treturn split_list #returns list of indeces where 0 is test and 1 is training data ", "def mnist_custom_split(split_ratio=0.8, random_seed=0, shuffle_dataset=True, dataset='mnist'):\n if dataset[:5] == 'mnist':\n dataset = datasets.MNIST(definitions.DATA_PATH)\n elif dataset[:6] == 'hmnist':\n dataset = datasets.DatasetFolder(definitions.HMNIST_DATA_FOLDER, data_loader, ALL_EXTS),\n elif dataset[:8] == 'diamonds':\n dataset = datasets.DatasetFolder(definitions.DIAMONDS_DATA_FOLDER, data_loader, ALL_EXTS),\n else:\n print('[ERROR] Unknown dataset for split_and_train! => %s' % dataset)\n exit(1)\n\n dataset_size = len(dataset)\n\n indices = list(range(dataset_size))\n split = int(np.floor(split_ratio * dataset_size))\n logger.debug('Split dataset {}'.format(split))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n # ==> Mistakes\n # train_indices, val_indices = indices[split:], indices[:split]\n train_indices, val_indices = indices[:split], indices[split:]\n\n # Creating PT data samplers and loaders:\n train_sampler = torch.utils.data.SubsetRandomSampler(train_indices)\n valid_sampler = torch.utils.data.SubsetRandomSampler(val_indices)\n\n return train_sampler, valid_sampler", "def _cluster(self):\n self._not_included = self.data\n self.leaves = []\n flag = int(rand() * len(self.data))\n flag = self._generate(flag)\n while len(self._not_included) > 0:\n flag = self._generate(flag)\n if flag == -1:\n break\n pass\n self._remember.append({\n 'threshold': self._base_threshold,\n 'result': len(self.leaves)\n })\n print(len(self._remember), {\n 'threshold': self._base_threshold,\n 'result': len(self.leaves)\n })\n return", "def get_random(everything_path, split_ratio=0.5, seed=0):\n # Loading data\n with h5py.File(everything_path, 'r') as data:\n labels = data['labels'].value\n \n # Splitting classes\n fri_i = np.where(labels==1)[0]\n frii_i = np.where(labels==2)[0]\n rand_i = np.where(labels==0)[0]\n\n # Shuffling\n fri_i = shuffle(fri_i, random_state=seed)\n frii_i = shuffle(frii_i, random_state=seed)\n rand_i = shuffle(rand_i, random_state=seed)\n\n # Splitting into training and testing sets\n cut = int(np.round(split_ratio * fri_i.shape[0]))\n train_fri = fri_i[cut:]\n test_fri = fri_i[:cut]\n\n cut = int(np.round(split_ratio * frii_i.shape[0]))\n train_frii = frii_i[cut:]\n test_frii = frii_i[:cut]\n\n cut = int(np.round(split_ratio * rand_i.shape[0]))\n train_rand = rand_i[cut:]\n test_rand = rand_i[:cut]\n\n train_i = np.concatenate((train_fri, train_frii, train_rand), axis=0)\n test_i = np.concatenate((test_fri, test_frii, test_rand), axis=0)\n\n return train_i, test_i", "def split_data(test_data, split_ratio):\n split_index = int(split_ratio * len(test_data))\n \n # randomly permute the values in place\n random.shuffle(test_data)\n \n # take slices of the determined size\n training_set = copy.copy(test_data[:split_index])\n test_data = copy.copy(test_data[split_index:])\n\n return training_set, test_data", "def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)", "def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il", "def split(self, num_or_size_splits, shuffle=False):\n raise NotImplementedError", "def split_data(self):\n np.random.seed(seed=self.seed)\n indices = np.random.permutation(self.predictor_vars.shape[0])\n split_row = round(self.predictor_vars.shape[0] * self.train_split)\n train_idx, test_idx = indices[:split_row], indices[split_row:]\n self.predictor_vars_train, self.predictor_vars_test = (\n self.predictor_vars[train_idx, :],\n self.predictor_vars[test_idx, :],\n )\n self.response_var_train, self.response_var_test = (\n self.response_var[train_idx],\n self.response_var[test_idx],\n )", "def split_dataset(self, split):\n trunk_pos_size = math.ceil((1 - split) * len(self.Pos))\n trunk_neg_size = math.ceil((1 - split) * len(self.Neg))\n trunk_num = int(1 / (1 - split))\n pos_temp = list()\n neg_temp = list()\n for index in range(trunk_num):\n pos_temp.append(self.Pos[index * trunk_pos_size:(index + 1) *\n trunk_pos_size])\n neg_temp.append(self.Neg[index * trunk_neg_size:(index + 1) *\n trunk_neg_size])\n self.test = pos_temp.pop(2) + neg_temp.pop(2)\n # self.train = [i for item in pos_temp + neg_temp for i in item]\n self.train = []\n for item in pos_temp + neg_temp:\n for i in item:\n self.train.append(i)\n\n random.shuffle(self.train)\n random.shuffle(self.test)", "def _split(self, split, randomise=False, **kwargs):\r\n # Copy split to prevent modifying outside arguments\r\n split = split.copy()\r\n # Compute total\r\n total = sum(split.values())\r\n # If split contains floats, convert to integers\r\n if isinstance(total, float):\r\n assert_msg = 'Not enough data! ' \\\r\n + f'Split requires a total of {total*100}%. ' \\\r\n + 'Split should not exceed 100%.'\r\n assert total <= 1, assert_msg\r\n # Add 'rest' subset if not all data is used in split\r\n if total < 1:\r\n split['rest'] = 1 - total\r\n split = self._float_split_to_int(split)\r\n total = sum(split.values())\r\n # Create subsets based off integer values\r\n if isinstance(total, int):\r\n assert_msg = 'Not enough data! ' \\\r\n + f'Split requires a total of {total} data entries ' \\\r\n + f'but only {len(self.data)} are available.'\r\n assert total <= len(self.data), assert_msg\r\n # Add 'rest' subset if not all data is used in split\r\n if total < len(self.data):\r\n split['rest'] = len(self.data) - total\r\n # Create subsets\r\n index = 0\r\n for name, length in split.items():\r\n subset_name = f'{self.name}.{name}'\r\n subset_data = self.data[index:index + length]\r\n subset_seed = self.seed\r\n if self.seed is not None:\r\n subset_seed += sum([ord(c) for c in name]) + length\r\n subset = self._make_subset(subset_name,\r\n subset_data,\r\n randomise=randomise,\r\n seed=subset_seed,\r\n **kwargs\r\n )\r\n setattr(self, name, subset)\r\n index += length\r\n # Replace data with references to subsets\r\n self.data = []\r\n for name in split.keys():\r\n self.data.append(getattr(self, name, None))\r\n # Indicate that this is a superset\r\n self.is_superset = True", "def split_data(x, y, ratio, seed=1):\n # set seed\n np.random.seed(seed)\n packed = np.vstack([y,x]).T\n np.random.shuffle(packed)\n N = y.shape[0]\n eightyN = int(ratio*N)\n xTrain = packed[0:eightyN,1]\n yTrain = packed[0:eightyN,0]\n xTest = packed[eightyN:N, 1]\n yTest = packed[eightyN:N,0]\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n return xTrain, yTrain, xTest, yTest", "def train_test_data_split(node_features, labels, train_ratio=0.8):\n num_graph = node_features.shape[0]\n train_test_split = int(train_ratio*num_graph)\n x_train = node_features[:train_test_split,:,:] \n y_train = labels[:train_test_split,:,:] \n x_test = node_features[train_test_split:,:,:] \n y_test = labels[train_test_split:,:,:]\n np.save(\"data/node_features_train.npy\", x_train)\n np.save(\"data/node_features_test.npy\", x_test)\n np.save(\"data/labels_train.npy\", y_train)\n np.save(\"data/labels_test.npy\", y_test)\n return x_train, x_test, y_train, y_test", "def shuffle(self):\n self.train_nodes = np.random.permutation(self.train_nodes)\n self.batch_num = 0", "def shuffle(self):\n self.train_nodes = np.random.permutation(self.train_nodes)\n self.batch_num = 0", "def uniform_split(self, nr_agents):\n indices = np.linspace(start=0, stop=self.samples.shape[0], num=nr_agents + 1, dtype=int).tolist()\n\n self.samples = self.partition(self.samples, indices, nr_agents)\n self.labels = self.partition(self.labels, indices, nr_agents)", "def split(self, how, nr_agents, **kwargs):\n if how == 'random':\n self.random_split(nr_agents)\n elif how == 'uniform':\n self.uniform_split(nr_agents)\n elif how == 'non_iid_uniform':\n self.non_iid_split(nr_agents, kwargs['class_per_node'], random=False)\n elif how == 'non_iid_random':\n self.non_iid_split(nr_agents, kwargs['class_per_node'], random=True)\n\n return self.get_data()", "def train_test_split(filename: str, split=0.5) -> tuple:\n training_set = []\n test_set = []\n content = load_from_csv(filename)\n for _, value in enumerate(content):\n if random.random() < split:\n training_set.append(value)\n else:\n test_set.append(value)\n return training_set, test_set", "def chunks(data, n):\n newn = int(len(data) / n) # chunk size \n \n for i in range(0, n-1):\n test_chunk = data[i*newn:i*newn+newn]\n train_chunk = [el for el in data if el not in test_chunk]\n yield train_chunk, test_chunk\n \n test_chunk = data[n*newn-newn:]\n train_chunk = [el for el in data if el not in test_chunk]\n \n yield train_chunk, test_chunk", "def dataset_splits(self):\n # 10% evaluation data\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 799,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]", "def make_random_split(\n self, data=None, test_rate=0.1, n_negative=100, by_user=False, n_test=10\n ):\n if data is None:\n data = self.load_interaction()\n data = filter_user_item(data, min_u_c=3, min_i_c=3)\n\n if not isinstance(data, pd.DataFrame):\n raise RuntimeError(\"data is not a type of DataFrame\")\n\n result = split_data(\n data,\n split_type=\"random\",\n test_rate=test_rate,\n n_negative=n_negative,\n save_dir=self.processed_path,\n by_user=by_user,\n n_test=n_test,\n )\n return result", "def shuffle(self):\n self.train_edges = np.random.permutation(self.train_edges)\n self.nodes = np.random.permutation(self.nodes)\n self.batch_num = 0", "def dataset_splits(self):\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 80,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 2,\n }]", "def split_dataset(dataset, Ntotal, val_frac,\n batch_size, num_workers,\n random_seed=0, shuffle=True, balance=False):\n \n Nval = math.floor(Ntotal*val_frac)\n train_ds, val_ds = ch.utils.data.random_split(dataset, \n [Ntotal - Nval, Nval], \n generator=ch.Generator().manual_seed(random_seed))\n if balance: \n val_ds = balance_dataset(val_ds)\n split_datasets = [train_ds, val_ds]\n \n split_loaders = []\n for ds in split_datasets:\n split_loaders.append(ch.utils.data.DataLoader(ds, \n num_workers=num_workers, \n batch_size=batch_size, \n shuffle=shuffle))\n return split_datasets, split_loaders", "def split_dataset(dataset, n, seed=0):\n assert(n <= len(dataset))\n keys = list(range(len(dataset)))\n np.random.RandomState(seed).shuffle(keys)\n keys_1 = keys[:n]\n keys_2 = keys[n:]\n return _SplitDataset(dataset, keys_1), _SplitDataset(dataset, keys_2)", "def split_dataset(dataset, n, seed=0):\n assert n <= len(dataset)\n keys = list(range(len(dataset)))\n np.random.RandomState(seed).shuffle(keys)\n keys_1 = keys[:n]\n keys_2 = keys[n:]\n return _SplitDataset(dataset, keys_1), _SplitDataset(dataset, keys_2)", "def create_dataset_splits(n, p=1.0):\n\tperm = np.random.permutation(n).tolist()\n\tidx = int(p * n)\n\treturn perm[:idx]", "def split(bn, node, Z=None):\n # no estoy seguro si hace falta crear nuevas variables, pero mas\n # vale prevenir que lamentar\n for v in bn.V:\n if v.name == node:\n node = v\n break\n if Z is None:\n Z = bn.G[node]\n\n V = [RandomVariable(var.name, var.Domain) for var in bn.V]\n\n # mapeo de variables viejas a las nuevas. Sirve mas adelante para\n # crear la lista de arcos\n old2new = dict(zip(bn.V, V))\n\n # crear el nuevo nodo y agregarlo a la lista de variables\n newnode = RandomVariable(node.name + \"*\", node.Domain)\n V.append(newnode)\n\n # crear la lista de arcos\n E = []\n for parent, children in bn.G.iteritems():\n for child in children:\n if parent == node and child in Z:\n E.append((newnode, old2new[child]))\n else:\n E.append((old2new[parent], old2new[child]))\n\n # calcular los CPTs\n\n # para cada variable no afectada por el split + variable papa: copiar cpts\n for v in bn.V:\n if not (v in Z):\n old2new[v].cpt = Factor([old2new[e] for e in v.cpt.domain()], v.cpt.getfunction())\n\n # para la variable creada por el split: cpt uniforme\n newnode.cpt = Factor([newnode], [1.0/len(newnode.domain()) for e in newnode.domain()])\n\n # para cada variable hijo afectada por el split: cpt igual a anterior, pero\n # con lista de papas cambiada\n def cp(e):\n if e == node:\n return newnode\n\n return old2new[e]\n\n for v in Z:\n old2new[v].cpt = Factor([cp(e) for e in v.cpt.domain()], v.cpt.getfunction())\n \n name = bn.name + \" splitted\"\n beta = 1\n return DBN(V,E,name,\"\"), newnode", "def random_split(dataset, lengths):\n if sum(lengths) != len(dataset):\n raise ValueError(\"Sum of input lengths does not equal the length of the input dataset!\")\n\n indices = randperm(sum(lengths))\n return [Subset(dataset, indices[offset - length:offset]) for offset, length in zip(_accumulate(lengths), lengths)]", "def _split_generators(self, dl_manager: tfds.download.DownloadManager):\n \n download_server = environ.get('ROAD_OBSTACLE_URL')\n if download_server is None:\n raise RuntimeError('Please specify server URL as ROAD_OBSTACLE_URL env variable.')\n\n download_url = download_server + \"/dataset_RoadObstacle_0.0.3.zip\"\n download_dir = dl_manager.download_and_extract(download_url)\n\n data_dir = Path(download_dir) / 'dataset_RoadObstacle'\n\n splits = json.loads((data_dir / 'splits.json').read_text())\n\n make_split_entry = lambda name, key: SplitGenerator(\n name=name, \n gen_kwargs = dict(data_dir=str(data_dir), split=key)\n )\n\n return [\n make_split_entry(tfds.Split.TEST, 'full')\n ] + [\n make_split_entry(k, k)\n for k in sorted(splits.keys())\n ]", "def split_data(data, prob):\n\tresults = [], []\n\tfor row in data:\n\t\tresults[0 if random.random() < prob else 1].append(row)\n\treturn results", "def split_data(data, prob):\n\tresults = [], []\n\tfor row in data:\n\t\tresults[0 if random.random() < prob else 1].append(row)\n\treturn results", "def gen_splits(n_splits, test_size, X, Y, groups=None, random_state=0):\n from sklearn.model_selection import GroupShuffleSplit\n\n gss = GroupShuffleSplit(\n n_splits=n_splits, test_size=test_size, random_state=random_state\n )\n train_test_splits = list(gss.split(X, Y, groups=groups))\n split_indices = list(range(n_splits))\n return train_test_splits, split_indices", "def __generate_random_nodes(self,k=3):\n if k < 3:\n k = 3\n\n k = min(k,len(self.G.nodes()))\n self.__logger.info(\"RANDOM_NODES: try to generate a set of {} nodes sampled with uniform distribution\".format(k))\n \n return random.sample(self.G.nodes(),k)", "def run_trial(self, num_nodes): \n #compute the neighbors for the newly-created node\n new_node_neighbors = set()\n for dummy_idx in range(num_nodes):\n new_node_neighbors.add(random.choice(self._node_numbers))\n # update the list of node numbers so that each node number \n # appears in the correct ratio\n self._node_numbers.append(self._num_nodes)\n self._node_numbers.extend(list(new_node_neighbors)) \n #update the number of nodes\n self._num_nodes += 1\n return list(new_node_neighbors)", "def generate(self, num_leafs):\n leafs = self.get_leafs()\n for _ in range(num_leafs):\n box = leafs[np.random.choice(len(leafs))]\n leafs.remove(box)\n ch0, ch1 = box.split()\n self.add_edge(box, ch0)\n self.add_edge(box, ch1)\n leafs.append(ch0)\n leafs.append(ch1)", "def split_data(data, prob):\n results = [], []\n for row in data:\n results[0 if random.random() < prob else 1].append(row)\n return results", "def splitData(self,k,seed,data=None,M = 8):\n self.testdata = {}\n self.traindata = {}\n data = data or self.data\n random.seed(seed)\n for user,item, record in self.data:\n if random.randint(0,M) == k:\n self.testdata.setdefault(user,{})\n self.testdata[user][item] = record \n else:\n self.traindata.setdefault(user,{})\n self.traindata[user][item] = record", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def split_data(num_samples, num_splits):\n\n kf = sklearn.model_selection.KFold(n_splits=num_splits, random_state=0);\n return kf.split(range(num_samples))", "def split_simplified_json_acceptor_dataset(dataset: SimpleJsonAcceptorDataset, split_list):\n import numpy as np\n # create a list of lengths [0.1, 0.4, 0.5] -> [100, 500, 1000(=len_data)]\n split_list = np.multiply(np.cumsum(split_list), len(dataset)).astype(\"int\").tolist()\n # list of shuffled indices to sample randomly\n shuffled_idx = list(range(len(dataset)))\n shuffle(shuffled_idx)\n # split the data itself\n new_data = [[] for _ in range(len(split_list))]\n for sub_data_idx, (start, end) in enumerate(zip([0] + split_list[:-1], split_list)):\n for i in range(start, end):\n new_data[sub_data_idx].append(dataset.__getitem__(shuffled_idx[i]))\n # create sub sets\n sub_datasets = []\n for i in range(len(new_data)):\n ready_dict = {\n \"_idx_to_chr\": dataset._idx_to_chr,\n \"_chr_embed\": dataset._chr_embed,\n \"data\": new_data[i]\n }\n sub_datasets.append(SimpleJsonAcceptorDataset(dataset._size, ready=ready_dict))\n return sub_datasets", "def split_data(x, y, ratio, seed=1):\n # number of value\n num_points = len(y)\n # compute the index that split the datas\n split = int(np.floor(num_points * ratio))\n\n # set the seed to the given value\n np.random.seed(seed)\n # compute random indexes for training and testing\n rand_indexes = np.random.permutation(num_points)\n index_training = rand_indexes[:split]\n index_testing = rand_indexes[split:]\n\n return x[index_training], y[index_training], x[index_testing], y[index_testing]", "def buckets(data, n):\n # Shuffle all datasets to get a more consistent workload for all threads.\n random.shuffle(data)\n\n for i in range(0, len(data), n):\n yield data[i:i + n]", "def split_data(self, data, ratio=0.7, shuffle=True, seed=0):\n if shuffle:\n random.seed(seed) # fix to default seed 0\n random.shuffle(data)\n\n size = int(len(data) * ratio)\n data_1 = data[:size]\n data_2 = data[size:]\n\n return data_1, data_2", "def get_test_split(self, fraction=0.1):\n rng = np.random.default_rng(42)\n test_size = int(round(len(self.all_asset_ids) * fraction))\n test_ids = rng.choice(self.all_asset_ids, size=test_size, replace=False)\n train_ids = [i for i in self.all_asset_ids if i not in test_ids]\n return train_ids, test_ids", "def split(self, X=None, y=None, groups=None):\n\n for train_index in [0,1]:\n train_indices=np.where(self.test_fold==train_index)[0]\n test_indices=np.where(self.test_fold==(train_index+1)%2)[0]\n if self.shuffle:\n self.rng.shuffle(train_indices)\n self.rng.shuffle(test_indices)\n yield train_indices, test_indices", "def prepare_data_for_g(self):\n\n paths = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n sample, paths_from_i = self.sample(i, self.trees[i], config.n_sample_gen, for_d=False)\n if paths_from_i is not None:\n paths.extend(paths_from_i)\n # for each root, we generate 20 samples, each sample is equal to one path from root to that sample\n # So, we will get maximum (num_root x 20) paths\n # path is a list with length = (N x num_sample), with num_sample = 20\n # paths =[[path_root1_to_sample1],[path_root1_to_sample2],....,[path_root1_to_sample20],\n # [path_root2_to_sample1],[path_root2_to_sample2],....,[path_root2_to sample20]\n # .\n # .\n # [path_rootN_to_sample1],[path_rootN_to_sample2],....,[path_rootN_to_sample20]]\n # get_node_pairs_from_path\n\n node_pairs = list(map(self.get_node_pairs_from_path, paths))\n # node_pairs = [[node pairs for path_root1_to_sample1],[node pairs for path_root1_to_sample2],....,[node pairs for path_root1_to_sample20],\n # [node_pairs for path_root2_to_sample1],[node pairs for path_root2_to_sample2],....,[node pairs for path_root2_to sample20],\n # .\n # .\n # [node pairs for path_rootN_to_sample1],[node pairs for path_rootN_to_sample2],....,[node pairs for path_rootN_to_sample20]]\n\n node_1 = []\n node_2 = []\n for i in range(len(node_pairs)):\n for pair in node_pairs[i]:\n node_1.append(pair[0])\n node_2.append(pair[1])\n # reward = self.sess.run(self.discriminator.reward,\n # feed_dict={self.discriminator.node_id: np.array(node_1),\n # self.discriminator.node_neighbor_id: np.array(node_2)})\n reward = self.discriminator.forward(node_1, node_2)\n return node_1, node_2, reward", "def split_data(self,test=False):\n shuffle_index = torch.randperm(self.train_target.shape[0])\n load = shuffle_index.shape[0]\n train_input_shuffle = self.train_input[shuffle_index]\n train_target_shuffle = self.train_target[shuffle_index]\n train_classes_shuffle = self.train_classes[shuffle_index]\n index_train = self.index_for_equal_class(train_target_shuffle[:load//2])\n train_input = train_input_shuffle[index_train]\n train_target = train_target_shuffle[index_train]\n train_classes = train_classes_shuffle[index_train]\n if not test:\n index_test = self.index_for_equal_class( train_target_shuffle[load//2:]) + load//2\n test_input = train_input_shuffle[index_test]\n test_target = train_target_shuffle[index_test]\n test_classes = train_classes_shuffle[index_test]\n else:\n index_test = self.index_for_equal_class(self.test_target)\n test_input = self.test_input[index_test]\n test_target = self.test_target[index_test]\n test_classes = self.test_classes[index_test]\n train_input, mean, std = normalize(train_input)\n test_input, _, _ = normalize(test_input,mean,std)\n return train_input, train_target, train_classes ,test_input ,test_target ,test_classes", "def random_nodes(self):\n SAMPLE_BUFFER_SIZE = 1000\n samples = []\n log('log: ')\n log('log: random_nodes()')\n while True:\n # if there are some left, try returning those\n if len(samples) > 0:\n # make sure the video has neighbors\n log('log: ensuring node has neighbors')\n video_id = samples.pop()\n video_node = Node(video_id)\n if len(video_node.neighbors) == 0:\n continue\n \n log('log: END random_nodes()')\n yield video_node\n else:\n # refill the buffer\n log('log: filling up buffer for random_nodes')\n samples = DB.sample(\n SAMPLE_BUFFER_SIZE,\n {\n \"related_videos\": { \"$exists\": True },\n \"basic_info\": { \"$exists\": True },\n \"frames.0\": { \"$exists\": True },\n }\n )\n log('log: buffer filled')\n \n # sanity check\n if len(samples) == 0:\n print('log: len(samples) == 0 AFTER retriving from the database, something is broken')\n break", "def split_data(x, y, ratio, seed=1):\n np.random.seed(seed)\n\n N = len(y)\n rat = int(np.floor(ratio*N))\n idx = np.random.choice(np.arange(len(x)), N, replace=False)\n \n x_ = x[idx]\n y_ = y[idx]\n \n train_x = x_[:rat]\n test_x = x_[rat:]\n \n train_y = y_[:rat]\n test_y = y_[rat:]\n \n return train_x, train_y, test_x, test_y", "def split(self, count):\n num_child_modules = self.num_modules - 1\n\n # Sample module counts at random\n module_counts = combinatorics.uniform_non_negative_integers_with_sum(\n count, num_child_modules)\n\n if num_child_modules == 0:\n if self.entropy > 0:\n raise ValueError('Unused entropy')\n entropies = np.zeros(count)\n else:\n entropies = self.entropy * np.random.dirichlet(\n np.maximum(1e-9, module_counts))\n\n sample_args = []\n for i, num_modules in enumerate(module_counts):\n child_sample_args = SampleArgs(\n num_modules=num_modules, entropy=entropies[i])\n sample_args.append(child_sample_args)\n\n return sample_args", "def split_data(dataset, test_size=0.5):\n shuffled_data = np.random.RandomState(seed=721).permutation(dataset)\n train_set = shuffled_data[: int(len(dataset) * (1 - test_size)), :]\n test_set = shuffled_data[int(len(dataset) * (1 - test_size)):, :]\n return train_set, test_set", "def _dataset_split_generators(self):\n raise NotImplementedError()", "def split_data(basedir, data_split=0.80):\n manip = data_manipulator(basedir)\n manip.train_test_split(data_split=data_split)", "def split_network(self):\n disconnect_nodes(self.nodes[1], 2)\n disconnect_nodes(self.nodes[2], 1)\n self.sync_all([self.nodes[:2], self.nodes[2:]])", "def random_split(X, test_rate=0.3):\n n_sample = X.shape[0]\n test_size = int(n_sample * test_rate)\n train_size = n_sample - test_size\n all_indices = list(range(n_sample))\n np.random.shuffle(all_indices) \n all_indices = np.array(all_indices)\n return all_indices[:train_size], all_indices[train_size:]", "def split(self, test_size=0.25, random_state=None):\n self.train_index, self.test_index = ms.train_test_split(\n self.data.index, test_size=test_size, random_state=random_state)", "def splitList(itms, numGr):\n\ttcount = len(itms)\n\tcItems = list(itms)\n\tsz = int(len(cItems) / numGr)\n\tgroups = list()\n\tcount = 0\n\tfor i in range(numGr):\n\t\tif (i == numGr - 1):\n\t\t\tcsz = tcount - count\n\t\telse:\n\t\t\tcsz = sz + randint(-2, 2)\n\t\t\tcount += csz\n\t\tgr = list()\n\t\tfor j in range(csz):\n\t\t\tit = selectRandomFromList(cItems)\n\t\t\tgr.append(it)\t\n\t\t\tcItems.remove(it)\t\n\t\tgroups.append(gr)\n\treturn groups", "def prepare_data_for_d(self):\n\n center_nodes = []\n neighbor_nodes = []\n labels = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n # self.graph[i] = [neighbors of i]\n pos = self.graph[i]\n neg, _ = self.sample(i, self.trees[i], len(pos), for_d=True)\n # print(\"tree_i_d: \", self.trees[i])\n # print(\"neg_samples: \", neg)\n # print(\"neg is: \", neg)\n if len(pos) != 0 and neg is not None:\n # positive samples\n center_nodes.extend([i] * len(pos))\n neighbor_nodes.extend(pos)\n labels.extend([1] * len(pos))\n\n # negative samples\n center_nodes.extend([i] * len(pos))\n neighbor_nodes.extend(neg)\n labels.extend([0] * len(neg))\n # print(\"cen: \", center_nodes)\n return center_nodes, neighbor_nodes, labels", "def split_data(x, y, ratio, seed=1):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Input:\n - x (ndarray) : binary prediction for set 1\n - y (ndarray) : binary prediction for set 2\n - ratio (ndarray) : binary prediction for set 3\n - seed (float) : indices of the data points in set 1 \n Output: \n - train_x (ndarray) : binary prediction for set 1\n - train_y (ndarray) : binary prediction for set 2\n - test_x (ndarray) : binary prediction for set 3\n - test_y (ndarray) : indices of the data points in set 1\n \"\"\"\n # set seed and shuffle the indices\n np.random.seed(seed)\n shuffle_indices = np.random.permutation(np.arange(len(y)))\n shuffled_y = y[shuffle_indices]\n shuffled_x = x[shuffle_indices]\n \n #splits the set according to the ratio on the shuffled set\n ratio_idx = int(np.floor(ratio*len(y)))\n train_y = shuffled_y[:ratio_idx]\n train_x = shuffled_x[:ratio_idx]\n test_y = shuffled_y[ratio_idx:]\n test_x = shuffled_x[ratio_idx:]\n return train_x, train_y, test_x, test_y", "def split(self, fractions=[0.8, 0.2]):\n\n if sum(fractions) > 1.0 or sum(fractions) <= 0:\n raise ValueError(\"the sum of fractions argument should be between 0 and 1\")\n\n # random indices\n idx = np.arange(self.n_samples)\n np.random.shuffle(idx)\n\n # insert zero\n fractions.insert(0, 0)\n\n # gte limits of the subsets\n limits = (np.cumsum(fractions) * self.n_samples).astype(np.int32)\n\n subsets = []\n # create output dataset\n for i in range(len(fractions) - 1):\n subsets.append(\n Dataset(self.inputs[idx[limits[i]:limits[i + 1]]], self.targets[idx[limits[i]:limits[i + 1]]]))\n\n return subsets", "def split_to_train_test(split_ratio, input_data):\n\n data = input_data.drop_duplicates()\n data = data.sample(frac = 1)\n data = np.r_[data]\n rows, columns = data.shape\n a = int(rows*split_ratio)\n train_data = data[0: a]\n test_data = data[a: rows+1]\n\n return train_data, test_data", "def partition(data: list, parts: list, *args: float) -> list:\n random.seed(42)\n partition_names = parts\n random.shuffle(data)\n n = len(data)\n rem, a, b = n, 0, 0\n parts = []\n\n for p in args:\n b = a + int(n*p)\n parts.append(data[a:b])\n rem -= (b - a)\n a = b\n # end\n\n parts.append(data[-rem:])\n return parts", "def random_partition(n, n_data):\n all_idxs = np.arange(n_data)\n np.random.shuffle(all_idxs)\n idxs1 = all_idxs[:n]\n idxs2 = all_idxs[n:]\n return idxs1, idxs2", "def random_assignment_of_clusters(self, data):\n return np.random.randint(low=0, high=self.n_clusters, size=len(data[:, 0]))", "def split(self, train_fraction=0.8, val_fraction=0.2, test_fraction=0, seed=1):\n if self.is_initialized():\n return\n self.ensure_fraction_sum(train_fraction, val_fraction, test_fraction)\n np.random.seed(seed)\n self.samples = sorted(self.samples)\n np.random.shuffle(self.samples)\n train_idx = ceil(train_fraction*(len(self.samples)))\n val_idx = train_idx + ceil(val_fraction*(len(self.samples)))\n test_idx = val_idx + ceil(test_fraction*(len(self.samples)))\n indices = list(range(len(self.samples)))\n self.indices[TRAIN_SUBSET] = indices[:train_idx]\n self.indices[VAL_SUBSET] = indices[train_idx:val_idx]\n self.indices[TEST_SUBSET] = indices[val_idx:test_idx]", "def build_toy_dataset(N):\n y_data = np.random.uniform(-10.5, 10.5, N)\n r_data = np.random.normal(size=N) # random noise\n x_data = np.sin(0.75 * y_data) * 7.0 + y_data * 0.5 + r_data * 1.0\n x_data = x_data.reshape((N, 1))\n return train_test_split(x_data, y_data, random_state=42)", "def _split_kmeans(self, node):\n\n # bi-partition with k-means until children have enough samples or max outliers is reached\n n_outliers = 0\n ids = node.ids\n left, right = None, None\n\n # define the score to improve upon\n if self.n_clusters >= self.min_leaves and node.size <= self.max_leaf_size:\n # require an improvement of children\n best_score = node.score\n # limit outliers to smallest cluster possible\n max_outliers = self.min_leaf_size\n else:\n # just take the best split (even if children are worse)\n best_score = None\n # no limit on outliers: always split\n max_outliers = np.inf\n\n # iterate until valid split or reached max outliers\n while n_outliers < max_outliers:\n labels = get_kmeans_split(self.E[ids])\n if labels is None:\n # could not split\n break\n # compute the split\n _lids = ids[labels == 0]\n _rids = ids[labels == 1]\n # check if the tubes are not too small\n _nl, _nr = len(_lids), len(_rids)\n if _nl + _nr != len(ids):\n raise SplitError(\"BUG in kmeans\")\n if _nl >= self.min_leaf_size and _nr >= self.min_leaf_size:\n # both children are large enough\n _sl = self.get_tube_score(_lids)\n _sr = self.get_tube_score(_rids)\n # get the score of this split\n score = min(_sl, _sr)\n # check if the split improves (each child has better score than the parent)\n if best_score is None or score > best_score:\n # register the split (vec is used to store depth in the tree)\n node.has_children = True\n best_score = score\n left = SpectralNode(\n _lids, node.vec + 1, score=_sl, name=node.name + \"0\")\n right = SpectralNode(\n _rids, node.vec + 1, score=_sr, name=node.name + \"1\")\n break\n elif _nl < self.min_leaf_size and _nr >= self.min_leaf_size:\n # left children is too small: add as outlier\n self.labels[_lids] = -1\n n_outliers += _nl\n # carry on with this subset\n ids = _rids\n elif _nr < self.min_leaf_size and _nl >= self.min_leaf_size:\n # right children is too small: add as outlier\n self.labels[_rids] = -1\n n_outliers += _nr\n # carry on with this subset\n ids = _lids\n else:\n # both too small: node is a leaf\n #msg = 'Both children are too small:'\n #msg+= ' too many outliers ({0} >= max_outliers={1})'.format(n_outliers, max_outliers)\n #msg+= ' or too small node size ({0})'.format(node.size)\n #raise SplitError(msg)\n break\n\n return left, right", "def split_data(x, y, ratio, seed=1):\n # set seed\n np.random.seed(seed)\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n \n def split_data(x, y, ratio, seed=1):\n \"\"\"split the dataset based on the split ratio.\"\"\"\n # set seed\n np.random.seed(seed)\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n trainDataLen = round(len(y)*ratio)\n \n trainDataID = random.sample(range(len(y)), trainDataLen)\n \n # USing bool value to obtaint he remainling data for validation data set\n validDataID = np.array(range(len(y))) + 1\n validDataID[trainDataID] = 0\n validDataID = validDataID >0\n \n \n # obtain the trainning data\n trainDataX = x[trainDataID]\n trainDataY = y[trainDataID]\n \n # obtain the validation data\n validDataX = x[validDataID]\n validDataY = y[validDataID] \n \n return trainDataX,trainDataY, validDataX, validDataY\n \n #raise NotImplementedError", "def load_random_split(\n self,\n test_rate=0.1,\n random=False,\n n_negative=100,\n by_user=False,\n n_test=10,\n download=False,\n force_redo=False,\n ):\n processed_random_split_path = os.path.join(self.processed_path, \"random\")\n if not os.path.exists(processed_random_split_path):\n os.mkdir(processed_random_split_path)\n\n parameterized_path = generate_parameterized_path(\n test_rate=test_rate, random=random, n_negative=n_negative, by_user=by_user\n )\n download_path = processed_random_split_path\n processed_random_split_path = os.path.join(\n processed_random_split_path, parameterized_path\n )\n if force_redo:\n self.make_random_split(\n test_rate=test_rate,\n random=random,\n n_negative=n_negative,\n by_user=by_user,\n n_test=n_test,\n )\n elif not os.path.exists(processed_random_split_path):\n if (\n download\n and test_rate == 0.1\n and random is False\n and n_negative == 100\n and by_user is False\n ):\n # default parameters, can be downloaded from Onedrive\n folder = OneDrive(\n url=self.processed_random_split_url, path=download_path\n )\n folder.download()\n un_zip(processed_random_split_path + \".zip\", download_path)\n else:\n # make\n self.make_random_split(\n test_rate=test_rate,\n random=random,\n n_negative=n_negative,\n by_user=by_user,\n n_test=n_test,\n )\n\n # load data from local storage\n return load_split_data(processed_random_split_path, n_test=n_test)", "def data_split(data, labels, train_ratio=0.5, rand_seed=42):\n\n assert 0 <= train_ratio <= 1, \"Error: training set ratio must be between 0 and 1\"\n\n x_train, x_temp, y_train, y_temp = train_test_split(data,\n labels,\n train_size=train_ratio,\n random_state=rand_seed)\n\n x_val, x_test, y_val, y_test = train_test_split(x_temp,\n y_temp,\n train_size=0.5,\n random_state=rand_seed)\n\n return x_train, x_val, x_test, y_train, y_val, y_test", "def split(self, params):\n\n if \"train_df\" in params.keys():\n self.df = params[\"train_df\"]\n if \"test_df\" in params.keys():\n self.df = pd.concat([self.df, params[\"test_df\"]])\n if \"n_splits\" in params.keys():\n self.n_splits = params[\"n_splits\"]\n if \"shuffle\" in params.keys():\n self.shuffle = params[\"shuffle\"]\n if \"random_state\" in params.keys():\n self.random_state = params[\"random_state\"]\n\n self.__validate_input()\n\n n_samples = num_of_samples(self.df)\n\n if self.n_splits > n_samples:\n raise ValueError(\n f\"Cannot have number of splits {self.n_splits} > number of\"\n f\" samples {n_samples}\"\n )\n\n indices = np.arange(n_samples)\n for test_indices in self.__iter_test_indices(n_samples):\n train_indices = indices[np.logical_not(test_indices)]\n test_indices = indices[test_indices]\n yield train_indices, test_indices", "def get_data():\n transform = Compose([paddle.vision.Resize(32),\n Normalize(mean=[127.5], std=[127.5], data_format='CHW'),\n paddle.vision.transforms.Transpose()])\n train_data = paddle.vision.datasets.Cifar10(mode='train', transform=transform)\n l = len(train_data)\n return paddle.io.random_split(train_data, [l // 2, l - l // 2])", "def bootstrap_data(self):\n for i in range(self.bootstraps):\n df_i = self.training_df.groupby(\n self.random_effect, group_keys=False\n ).apply(\n lambda x: x.sample(len(x), replace=True)\n )\n self.models.append(self.convert(df=df_i))", "def split_set(self, new_train_factor=None):\n if new_train_factor is not None:\n self._train_factor = NNData.percentage_limiter(new_train_factor)\n\n # calculates the number of examples and the number of training examples\n number_of_examples = len(self._features)\n number_of_training_examples = int(self._train_factor * number_of_examples)\n\n # generates a list of random indirect indices for the training examples\n self._train_indices = random.sample(range(number_of_examples), number_of_training_examples)\n self._train_indices = sorted(self._train_indices)\n\n # generates a list of random indirect indices for the testing examples\n self._test_indices = [number for number in range(number_of_examples) if not (number in self._train_indices)]\n self._test_indices = sorted(self._test_indices)", "def _split_flattened(data, split_ratio, seed=default.DEFAULT_CV_RANDOM_SEED):\n\n check.argument_numeric(split_ratio, 0, 1)\n\n pc = np.sum(data.values != 0)\n gs_count = int(split_ratio * pc)\n idx = _make_shuffled_index(pc, seed=seed)\n\n pr_idx = data.values[data.values != 0].copy()\n gs_idx = data.values[data.values != 0].copy()\n\n pr_idx[idx[0:gs_count]] = 0\n gs_idx[idx[gs_count:]] = 0\n\n gs = data.values.copy()\n pr = data.values.copy()\n\n gs[gs != 0] = gs_idx\n pr[pr != 0] = pr_idx\n\n priors_data = pd.DataFrame(pr, index=data.index, columns=data.columns)\n gold_standard = pd.DataFrame(gs, index=data.index, columns=data.columns)\n\n return priors_data, gold_standard", "def _split_data(self, x, y):\n\tindices = range(self.N)\n\tnp.random.shuffle(indices)\n\ttrain_idx, test_idx = indices[:self.TRAIN_SIZE], indices[self.TRAIN_SIZE:]\n\treturn (x[train_idx,:], y[train_idx,:], x[test_idx,:], y[test_idx,:])", "def generate_samples(self, data_dir, tmp_dir, dataset_split):\n files = self.source_data_files(data_dir, tmp_dir, dataset_split)\n vocab = _extract_vocab_data(files)\n\n # Determine the number of instances to generate\n if dataset_split == problem.DatasetSplit.TRAIN:\n num_instances = self.num_train_instances\n else:\n num_instances = self.num_eval_instances\n\n for _ in range(num_instances):\n instance_size = random.randint(self.min_size, self.max_size)\n tokens = random.choices(vocab, k=instance_size)\n instance = ''.join(tokens)\n yield {'inputs': instance, 'targets': instance}", "def compute_splits(self, G, nw_name='test', samp_frac=0.01, split_id=0, verbose=False):\n # Sample the required number of node pairs from the graph\n train_E, train_E_false = stt.random_edge_sample(nx.adjacency_matrix(G, nodelist=range(len(G.nodes))),\n samp_frac, nx.is_directed(G))\n\n # Raise an error if no edges were selected while sampling matrix entries (both edges and non-edges are required)\n if len(train_E) == 0:\n raise ValueError(\"Sampling fraction {} on {} network is too low, no edges were selected.\".format(samp_frac,\n nw_name))\n\n # Set class attributes to new values\n self.set_splits(TG=G, train_E=train_E, train_E_false=train_E_false, samp_frac=samp_frac,\n directed=nx.is_directed(G), nw_name=nw_name, split_id=split_id, verbose=verbose)\n\n return train_E, train_E_false", "def split_simple_json_language_model_dataset(dataset: SimpleJsonLanguageModelDataset, split_list):\n import numpy as np\n # create a list of lengths [0.1, 0.4, 0.5] -> [100, 500, 1000(=len_data)]\n split_list = np.multiply(np.cumsum(split_list), len(dataset)).astype(\"int\").tolist()\n # list of shuffled indices to sample randomly\n shuffled_idx = list(range(len(dataset)))\n shuffle(shuffled_idx)\n # split the data itself\n new_data = [[] for _ in range(len(split_list))]\n for sub_data_idx, (start, end) in enumerate(zip([0] + split_list[:-1], split_list)):\n for i in range(start, end):\n new_data[sub_data_idx].append(dataset.__getitem__(shuffled_idx[i]))\n # create sub sets\n sub_datasets = []\n for i in range(len(new_data)):\n ready_dict = {\n \"_labels\": dataset._labels,\n \"_label_to_idx\": dataset._label_to_idx,\n \"_chr_embed\": dataset._chr_embed,\n \"_idx_to_chr\": dataset._idx_to_chr,\n \"data\": new_data[i],\n }\n sub_datasets.append(SimpleJsonLanguageModelDataset(dataset._size, ready=ready_dict))\n return sub_datasets", "def data_feeder_2():\n return random.sample(range(100), 10)", "def _randomize(self):\n return self.graph", "def totem_random():\n random_head()\n random_head()\n random_head()", "def partition(self, data, labels):\n\t\tfor i in range(self.splits):\n\t\t\tyield self.makePartition(len(labels))", "def train_test_split(X, y, test_size=0.33, random_state=None, shuffle=True):\n\n copyX = copy.deepcopy(X)\n copyY = copy.deepcopy(y)\n if random_state is not None:\n # TODO: seed your random number generator\n #Seed random number generator\n np.random.seed(random_state)\n \n if shuffle: \n # TODO: shuffle the rows in X and y before splitting\n # be sure to maintain the parallel order of X and y!!\n # note: the unit test for train_test_split() does not test\n # your use of random_state or shuffle, but you should still \n # implement this and check your work yourself\n copyX, copyY = myutils.randomize_in_place(copyX,copyY)\n\n #Define Variables\n X_train = []\n X_test = []\n y_train = []\n y_test = []\n prop_sum = 0.0\n numTest = 0\n proportion = 1.0/float(len(X))\n\n #Determine how many values to put in test set\n while(prop_sum < test_size):\n numTest = numTest + 1\n prop_sum = prop_sum + proportion\n \n #Put values in train/test sets\n for i in range(len(X)):\n if(test_size>=1):\n if(i<=len(X)-1-test_size):\n X_train.append(copyX[i])\n y_train.append(copyY[i])\n else:\n X_test.append(copyX[i])\n y_test.append(copyY[i])\n else:\n if(i<=len(X)-1-numTest):\n X_train.append(copyX[i])\n y_train.append(copyY[i])\n else:\n X_test.append(copyX[i])\n y_test.append(copyY[i])\n\n return X_train, X_test, y_train, y_test", "def split_test_data():\n outputvis = ROOT_DIR + 'test_imaging/test_split_1eb.ms'\n targ = TARGETS['NGC1333IRAS4A']\n spw = '{0}:236~276'.format(SPWS[targ.name]['NH3_11'].spw_id)\n split(\n vis=get_vis_name(targ),\n outputvis=outputvis,\n field=targ.name,\n spw=spw,\n )", "def _create_train_val_split(\n self, data_size, shuffle = False, seed = None\n ):\n val_size = int(np.round(data_size * self._val_fraction))\n val_size = max(1, val_size) if self._val_fraction > 0 else 0\n train_size = data_size - val_size\n train_split = np.concatenate(\n [np.ones([train_size], dtype=np.int32),\n np.zeros([val_size], dtype=np.int32)])\n if shuffle:\n np.random.RandomState(seed).shuffle(train_split)\n return train_split", "def subsampleData(self, count):\n size = 0\n for block in self.blocks: size += len(block[1])\n subset = numpy.random.permutation(size)[:count]\n subset.sort()\n\n pos = 0\n index = 0\n ret = Dataset()\n for block in self.blocks:\n while subset[index]<(pos+len(block[1])):\n loc = subset[index] - pos\n ret.add(block[0][loc,:], block[1][loc])\n index += 1\n if index==subset.shape[0]: return ret\n pos += len(block[1])\n \n return ret", "def split_train_test(data: DF, test_ratio: float, random_state: tp.Optional[int] = None):\n if random_state:\n np.random.seed(random_state)\n shuffled_indices = np.random.permutation(len(data))\n test_set_size = int(len(data) * test_ratio)\n test_indices = shuffled_indices[:test_set_size]\n train_indices = shuffled_indices[test_set_size:]\n return data.iloc[train_indices], data.iloc[test_indices]", "def test_split(self):\n array = np.arange(1000)\n df = DataFlow.from_numpy(array)\n\n # first, test throw errors on invalid arguments\n def assert_invalid_arg(**kwargs):\n with self.assertRaises(ValueError):\n df.split(**kwargs)\n assert_invalid_arg(partitions=[])\n assert_invalid_arg(partitions=[1000, 1])\n assert_invalid_arg(partitions=[1000, -1])\n assert_invalid_arg(partitions=[1, 2])\n assert_invalid_arg(portions=[])\n assert_invalid_arg(portions=[1.0, 0.1])\n assert_invalid_arg(portions=[1.0, -1])\n assert_invalid_arg(portions=[0.1, 0.2])\n\n # next, test split without shuffling\n df1, df2, df3 = df.split(partitions=[700, 200, 100])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n df1, df2, df3 = df.split(portions=[-1, 0.2, 0.1])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n\n # finally, test split with shuffling\n df1, df2 = df.split(portions=[0.5, -1], shuffle=True)\n self.assertEquals(len(df1), 500)\n self.assertEquals(len(df2), 500)\n df_array = np.concatenate([df1.all()[0], df2.all()[0]], axis=0)\n self.assertFalse(np.all(df_array == array))\n np.testing.assert_array_equal(np.sort(df_array), array)", "def train(self):\n max_tuple = self.max_gain()\n # If that gain is 0 then every node should be a pure leaf (hopefully) and you can stop\n while max_tuple.gain != 0:\n max_tuple.node.split(max_tuple.attribute)\n max_tuple = self.max_gain()" ]
[ "0.68305767", "0.6814471", "0.6748061", "0.6691443", "0.66873974", "0.65587986", "0.6541015", "0.6525626", "0.64753836", "0.64283437", "0.63601255", "0.6316696", "0.63140213", "0.6251376", "0.6247065", "0.62162656", "0.6213909", "0.6207955", "0.61947703", "0.6160189", "0.6145648", "0.61302125", "0.6110836", "0.6110836", "0.6079177", "0.6077904", "0.60741174", "0.6067357", "0.60639167", "0.6063279", "0.6044902", "0.6036848", "0.603137", "0.6019551", "0.60139656", "0.600476", "0.5997319", "0.59969264", "0.5976422", "0.59720355", "0.59720355", "0.5967787", "0.5967643", "0.59624374", "0.5960205", "0.59561735", "0.593473", "0.59157336", "0.5901273", "0.5899425", "0.58991057", "0.58989465", "0.5892114", "0.58903694", "0.58825946", "0.58823436", "0.5881913", "0.5879171", "0.587454", "0.58743685", "0.58700746", "0.58661115", "0.58591944", "0.58465177", "0.58148915", "0.58080167", "0.5800037", "0.5798051", "0.57967526", "0.5786677", "0.5778863", "0.5774194", "0.5769062", "0.57659227", "0.5745206", "0.5735132", "0.5712849", "0.57117295", "0.57083154", "0.56960464", "0.5691804", "0.5690876", "0.568905", "0.5688249", "0.56751966", "0.566264", "0.5661343", "0.5659315", "0.5653828", "0.56479555", "0.5643438", "0.56408614", "0.5639223", "0.5638671", "0.56379604", "0.56345314", "0.5633469", "0.5631919", "0.56245387", "0.5617137" ]
0.6804831
2
Give each Node uniform splits of data. Nodes will have same amounts of data.
def uniform_split(self, nr_agents): indices = np.linspace(start=0, stop=self.samples.shape[0], num=nr_agents + 1, dtype=int).tolist() self.samples = self.partition(self.samples, indices, nr_agents) self.labels = self.partition(self.labels, indices, nr_agents)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_data(self):\r\n print('split data')\r\n np.random.shuffle(self.dataList)\r\n l = len(self.dataList)/self.fold\r\n self.dataList = [self.dataList[i*l: (i+1)*l] for i in range(self.fold-1)] + [self.dataList[(self.fold-1)*l:]] # each element in the list is splitted data list\r", "def split_network(self):\n disconnect_nodes(self.nodes[1], 2)\n disconnect_nodes(self.nodes[2], 1)\n self.sync_all([self.nodes[:2], self.nodes[2:]])", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def split(self):\n\n ratio_c = 1 - self.ratio\n self.train, self.test = self.df.randomSplit([self.ratio, ratio_c], seed=12345)", "def split_data(basedir, data_split=0.80):\n manip = data_manipulator(basedir)\n manip.train_test_split(data_split=data_split)", "def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il", "def train_test_split(self):\n random.seed(self.args.seed)\n nodes = [node for node in range(self.ncount)]\n random.shuffle(nodes)\n self.train_nodes = torch.LongTensor(nodes[0:self.args.training_size])\n self.validation_nodes = torch.LongTensor(nodes[self.args.training_size:self.args.training_size+self.args.validation_size])\n self.test_nodes = torch.LongTensor(nodes[self.args.training_size+self.args.validation_size:])", "def chunks(data, n):\n newn = int(len(data) / n) # chunk size \n \n for i in range(0, n-1):\n test_chunk = data[i*newn:i*newn+newn]\n train_chunk = [el for el in data if el not in test_chunk]\n yield train_chunk, test_chunk\n \n test_chunk = data[n*newn-newn:]\n train_chunk = [el for el in data if el not in test_chunk]\n \n yield train_chunk, test_chunk", "def dataset_splits(self):\n # 10% evaluation data\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 799,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]", "def dataset_splits(self):\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 80,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 2,\n }]", "def partition(data, n):\n splits = []\n remaining = data.copy(deep=True)\n for i in range(n):\n split = remaining.sample(frac=1/(n-i), random_state=10)\n splits.append(split)\n remaining = remaining.drop(split.index)\n return splits", "def non_iid_split(self, nr_agents, class_per_node, random):\n unique = list(set(self.labels.tolist()))\n len_unique = len(unique)\n\n # Create array that assigns a class to specific nodes\n # Use 'np.arange' to ensure every class is represented before repeating\n # A row represents nr_agents, a column represents classes per node\n agent_class_master = np.arange(start=0, stop=nr_agents * class_per_node) % len_unique\n np.random.shuffle(agent_class_master)\n agent_class_master = agent_class_master.reshape(nr_agents, class_per_node)\n\n # Split data by labels\n sample_list = [[] for _ in range(len_unique)]\n for i in range(len(self.labels)):\n sample_list[self.labels[i]].append(self.samples[i])\n\n # By class creates uniform or random indices splits to partition data to agents evenly\n class_count = np.bincount(agent_class_master.ravel())\n class_indices = {}\n for i in range(len(class_count)):\n if random:\n indices = sorted(np.random.randint(0, high=len(sample_list[i]), size=class_count[i] - 1).tolist())\n indices = [0] + indices\n indices += [len(sample_list[i])]\n class_indices[i] = indices\n else:\n class_indices[i] = np.linspace(start=0, stop=len(sample_list[i]), num=class_count[i] + 1,\n dtype=int).tolist()\n\n # Main loop that partitions data by the assigned class and proper amount\n all_agents = []\n all_class = []\n for agent in agent_class_master:\n agent_data = []\n agent_class = []\n for cls in agent:\n # Proportioned indices for data and grab correctly indexed data\n temp_indices = class_indices[cls]\n data_for_agent = sample_list[cls][temp_indices[0]:temp_indices[1] - 1]\n\n # Add data and class to this agents list\n agent_data = agent_data + data_for_agent\n agent_class = agent_class + [cls for _ in range(len(data_for_agent))]\n\n # Drop first index since we used that data, forces next person to use next index\n class_indices[cls] = temp_indices[1:]\n\n # Append agents data and class labels in order\n all_agents.append(torch.stack(agent_data))\n all_class.append(torch.tensor(agent_class))\n\n self.samples = all_agents\n self.labels = all_class", "def partition(self, data, labels):\n\t\tfor i in range(self.splits):\n\t\t\tyield self.makePartition(len(labels))", "def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)", "def get_split_data(self):\n X, y, _, _ = self.get_subsets()\n return train_test_split(X, y, test_size=0.3, random_state=42)", "def split(self, fractions=[0.8, 0.2]):\n\n if sum(fractions) > 1.0 or sum(fractions) <= 0:\n raise ValueError(\"the sum of fractions argument should be between 0 and 1\")\n\n # random indices\n idx = np.arange(self.n_samples)\n np.random.shuffle(idx)\n\n # insert zero\n fractions.insert(0, 0)\n\n # gte limits of the subsets\n limits = (np.cumsum(fractions) * self.n_samples).astype(np.int32)\n\n subsets = []\n # create output dataset\n for i in range(len(fractions) - 1):\n subsets.append(\n Dataset(self.inputs[idx[limits[i]:limits[i + 1]]], self.targets[idx[limits[i]:limits[i + 1]]]))\n\n return subsets", "def split_data(x, y, ratio, seed=1):\n # set seed\n np.random.seed(seed)\n packed = np.vstack([y,x]).T\n np.random.shuffle(packed)\n N = y.shape[0]\n eightyN = int(ratio*N)\n xTrain = packed[0:eightyN,1]\n yTrain = packed[0:eightyN,0]\n xTest = packed[eightyN:N, 1]\n yTest = packed[eightyN:N,0]\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n return xTrain, yTrain, xTest, yTest", "def split_dataset(self, split):\n trunk_pos_size = math.ceil((1 - split) * len(self.Pos))\n trunk_neg_size = math.ceil((1 - split) * len(self.Neg))\n trunk_num = int(1 / (1 - split))\n pos_temp = list()\n neg_temp = list()\n for index in range(trunk_num):\n pos_temp.append(self.Pos[index * trunk_pos_size:(index + 1) *\n trunk_pos_size])\n neg_temp.append(self.Neg[index * trunk_neg_size:(index + 1) *\n trunk_neg_size])\n self.test = pos_temp.pop(2) + neg_temp.pop(2)\n # self.train = [i for item in pos_temp + neg_temp for i in item]\n self.train = []\n for item in pos_temp + neg_temp:\n for i in item:\n self.train.append(i)\n\n random.shuffle(self.train)\n random.shuffle(self.test)", "def buckets(data, n):\n # Shuffle all datasets to get a more consistent workload for all threads.\n random.shuffle(data)\n\n for i in range(0, len(data), n):\n yield data[i:i + n]", "def split_data(test_data, split_ratio):\n split_index = int(split_ratio * len(test_data))\n \n # randomly permute the values in place\n random.shuffle(test_data)\n \n # take slices of the determined size\n training_set = copy.copy(test_data[:split_index])\n test_data = copy.copy(test_data[split_index:])\n\n return training_set, test_data", "def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]", "def grow(self):\n while self.splittable_nodes:\n self.split_next()", "def split(self, num_or_size_splits, shuffle=False):\n raise NotImplementedError", "def _distribute_data_to_cluster(self):\n\n for data in self.data:\n _distances = self._calculate_distances(data)\n _cluster = self._get_closest_cluster(_distances)\n self.clusters[_cluster].append(data)", "def _create_split(cls, onnx_node, inputs, opset_version):\n axis = onnx_node.getattr(\"axis\", 0)\n split = onnx_node.getattr(\"split\", None)\n num_output = len(onnx_node.outputs)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axis, split, num_output)", "def _cluster(self):\n self._not_included = self.data\n self.leaves = []\n flag = int(rand() * len(self.data))\n flag = self._generate(flag)\n while len(self._not_included) > 0:\n flag = self._generate(flag)\n if flag == -1:\n break\n pass\n self._remember.append({\n 'threshold': self._base_threshold,\n 'result': len(self.leaves)\n })\n print(len(self._remember), {\n 'threshold': self._base_threshold,\n 'result': len(self.leaves)\n })\n return", "def _split_flattened(data, split_ratio, seed=default.DEFAULT_CV_RANDOM_SEED):\n\n check.argument_numeric(split_ratio, 0, 1)\n\n pc = np.sum(data.values != 0)\n gs_count = int(split_ratio * pc)\n idx = _make_shuffled_index(pc, seed=seed)\n\n pr_idx = data.values[data.values != 0].copy()\n gs_idx = data.values[data.values != 0].copy()\n\n pr_idx[idx[0:gs_count]] = 0\n gs_idx[idx[gs_count:]] = 0\n\n gs = data.values.copy()\n pr = data.values.copy()\n\n gs[gs != 0] = gs_idx\n pr[pr != 0] = pr_idx\n\n priors_data = pd.DataFrame(pr, index=data.index, columns=data.columns)\n gold_standard = pd.DataFrame(gs, index=data.index, columns=data.columns)\n\n return priors_data, gold_standard", "def split_data(self):\n np.random.seed(seed=self.seed)\n indices = np.random.permutation(self.predictor_vars.shape[0])\n split_row = round(self.predictor_vars.shape[0] * self.train_split)\n train_idx, test_idx = indices[:split_row], indices[split_row:]\n self.predictor_vars_train, self.predictor_vars_test = (\n self.predictor_vars[train_idx, :],\n self.predictor_vars[test_idx, :],\n )\n self.response_var_train, self.response_var_test = (\n self.response_var[train_idx],\n self.response_var[test_idx],\n )", "def random_split(self, nr_agents):\n np.random.seed(self.random_seed)\n # Get random indices\n indices = sorted(np.random.randint(0, high=self.samples.shape[0], size=nr_agents - 1).tolist())\n indices = [0] + indices\n indices += [self.samples.shape[0]]\n\n self.samples = self.partition(self.samples, indices, nr_agents)\n self.labels = self.partition(self.labels, indices, nr_agents)", "def batch_split(self) -> np.array:\n pass", "def split_dataset(dataset, Ntotal, val_frac,\n batch_size, num_workers,\n random_seed=0, shuffle=True, balance=False):\n \n Nval = math.floor(Ntotal*val_frac)\n train_ds, val_ds = ch.utils.data.random_split(dataset, \n [Ntotal - Nval, Nval], \n generator=ch.Generator().manual_seed(random_seed))\n if balance: \n val_ds = balance_dataset(val_ds)\n split_datasets = [train_ds, val_ds]\n \n split_loaders = []\n for ds in split_datasets:\n split_loaders.append(ch.utils.data.DataLoader(ds, \n num_workers=num_workers, \n batch_size=batch_size, \n shuffle=shuffle))\n return split_datasets, split_loaders", "def train_val_split(self):\n idx = np.arange(self.num_data)\n np.random.shuffle(idx)\n val_num = int(self.ratio * self.num_data)\n dev_num = int(self.dev_ratio * self.num_data)\n self.num_train = self.num_data - val_num\n\n self.val_data = self.data[idx[:val_num]]\n self.val_label = self.label[idx[:val_num]]\n \n self.train_data = self.data[idx[val_num:]]\n self.train_label = self.label[idx[val_num:]]\n\n self.dev_data = self.data[idx[:dev_num]]\n self.dev_label = self.label[idx[:dev_num]]", "def _split(self, split, randomise=False, **kwargs):\r\n # Copy split to prevent modifying outside arguments\r\n split = split.copy()\r\n # Compute total\r\n total = sum(split.values())\r\n # If split contains floats, convert to integers\r\n if isinstance(total, float):\r\n assert_msg = 'Not enough data! ' \\\r\n + f'Split requires a total of {total*100}%. ' \\\r\n + 'Split should not exceed 100%.'\r\n assert total <= 1, assert_msg\r\n # Add 'rest' subset if not all data is used in split\r\n if total < 1:\r\n split['rest'] = 1 - total\r\n split = self._float_split_to_int(split)\r\n total = sum(split.values())\r\n # Create subsets based off integer values\r\n if isinstance(total, int):\r\n assert_msg = 'Not enough data! ' \\\r\n + f'Split requires a total of {total} data entries ' \\\r\n + f'but only {len(self.data)} are available.'\r\n assert total <= len(self.data), assert_msg\r\n # Add 'rest' subset if not all data is used in split\r\n if total < len(self.data):\r\n split['rest'] = len(self.data) - total\r\n # Create subsets\r\n index = 0\r\n for name, length in split.items():\r\n subset_name = f'{self.name}.{name}'\r\n subset_data = self.data[index:index + length]\r\n subset_seed = self.seed\r\n if self.seed is not None:\r\n subset_seed += sum([ord(c) for c in name]) + length\r\n subset = self._make_subset(subset_name,\r\n subset_data,\r\n randomise=randomise,\r\n seed=subset_seed,\r\n **kwargs\r\n )\r\n setattr(self, name, subset)\r\n index += length\r\n # Replace data with references to subsets\r\n self.data = []\r\n for name in split.keys():\r\n self.data.append(getattr(self, name, None))\r\n # Indicate that this is a superset\r\n self.is_superset = True", "def mnist_custom_split(split_ratio=0.8, random_seed=0, shuffle_dataset=True, dataset='mnist'):\n if dataset[:5] == 'mnist':\n dataset = datasets.MNIST(definitions.DATA_PATH)\n elif dataset[:6] == 'hmnist':\n dataset = datasets.DatasetFolder(definitions.HMNIST_DATA_FOLDER, data_loader, ALL_EXTS),\n elif dataset[:8] == 'diamonds':\n dataset = datasets.DatasetFolder(definitions.DIAMONDS_DATA_FOLDER, data_loader, ALL_EXTS),\n else:\n print('[ERROR] Unknown dataset for split_and_train! => %s' % dataset)\n exit(1)\n\n dataset_size = len(dataset)\n\n indices = list(range(dataset_size))\n split = int(np.floor(split_ratio * dataset_size))\n logger.debug('Split dataset {}'.format(split))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n # ==> Mistakes\n # train_indices, val_indices = indices[split:], indices[:split]\n train_indices, val_indices = indices[:split], indices[split:]\n\n # Creating PT data samplers and loaders:\n train_sampler = torch.utils.data.SubsetRandomSampler(train_indices)\n valid_sampler = torch.utils.data.SubsetRandomSampler(val_indices)\n\n return train_sampler, valid_sampler", "def train_test_split(dataset, split):\r\n train = list()\r\n train_size = split * len(dataset)\r\n dataset_copy = list(dataset) \r\n while len(train) < train_size:\r\n index = randrange(len(dataset_copy))\r\n train.append(dataset_copy.pop(index))\r\n return train, dataset_copy", "def split_simplified_json_acceptor_dataset(dataset: SimpleJsonAcceptorDataset, split_list):\n import numpy as np\n # create a list of lengths [0.1, 0.4, 0.5] -> [100, 500, 1000(=len_data)]\n split_list = np.multiply(np.cumsum(split_list), len(dataset)).astype(\"int\").tolist()\n # list of shuffled indices to sample randomly\n shuffled_idx = list(range(len(dataset)))\n shuffle(shuffled_idx)\n # split the data itself\n new_data = [[] for _ in range(len(split_list))]\n for sub_data_idx, (start, end) in enumerate(zip([0] + split_list[:-1], split_list)):\n for i in range(start, end):\n new_data[sub_data_idx].append(dataset.__getitem__(shuffled_idx[i]))\n # create sub sets\n sub_datasets = []\n for i in range(len(new_data)):\n ready_dict = {\n \"_idx_to_chr\": dataset._idx_to_chr,\n \"_chr_embed\": dataset._chr_embed,\n \"data\": new_data[i]\n }\n sub_datasets.append(SimpleJsonAcceptorDataset(dataset._size, ready=ready_dict))\n return sub_datasets", "def split(self, train_fraction=0.8, val_fraction=0.2, test_fraction=0, seed=1):\n if self.is_initialized():\n return\n self.ensure_fraction_sum(train_fraction, val_fraction, test_fraction)\n np.random.seed(seed)\n self.samples = sorted(self.samples)\n np.random.shuffle(self.samples)\n train_idx = ceil(train_fraction*(len(self.samples)))\n val_idx = train_idx + ceil(val_fraction*(len(self.samples)))\n test_idx = val_idx + ceil(test_fraction*(len(self.samples)))\n indices = list(range(len(self.samples)))\n self.indices[TRAIN_SUBSET] = indices[:train_idx]\n self.indices[VAL_SUBSET] = indices[train_idx:val_idx]\n self.indices[TEST_SUBSET] = indices[val_idx:test_idx]", "def splitData(data, class_label, seed, ratio):\n\t\n\trandom.seed(seed)\n\tsubset = data.clone()\n\tsize_data = subset.data.shape[0]\n\tn = int(np.floor(size_data * ratio)) # number of datasets in train\n\tindex = random.sample(range(1, size_data), n)\n\tsplit_list = [item for item in [0] for i in range(size_data)]\n\t\n\tfor i in index:\n\t\tsplit_list[i]=1\n\t\n\treturn split_list #returns list of indeces where 0 is test and 1 is training data ", "def _dataset_split_generators(self):\n raise NotImplementedError()", "def split_data(self, data, ratio=0.7, shuffle=True, seed=0):\n if shuffle:\n random.seed(seed) # fix to default seed 0\n random.shuffle(data)\n\n size = int(len(data) * ratio)\n data_1 = data[:size]\n data_2 = data[size:]\n\n return data_1, data_2", "def train_test_data_split(node_features, labels, train_ratio=0.8):\n num_graph = node_features.shape[0]\n train_test_split = int(train_ratio*num_graph)\n x_train = node_features[:train_test_split,:,:] \n y_train = labels[:train_test_split,:,:] \n x_test = node_features[train_test_split:,:,:] \n y_test = labels[train_test_split:,:,:]\n np.save(\"data/node_features_train.npy\", x_train)\n np.save(\"data/node_features_test.npy\", x_test)\n np.save(\"data/labels_train.npy\", y_train)\n np.save(\"data/labels_test.npy\", y_test)\n return x_train, x_test, y_train, y_test", "def train(self):\n max_tuple = self.max_gain()\n # If that gain is 0 then every node should be a pure leaf (hopefully) and you can stop\n while max_tuple.gain != 0:\n max_tuple.node.split(max_tuple.attribute)\n max_tuple = self.max_gain()", "def randsplit(data, sections=2):\n ret = [[] for i in range(sections)]\n for item in data:\n ret[random.randrange(sections)].append(item)\n return ret", "def split(self, X=None, y=None, groups=None):\n\n for train_index in [0,1]:\n train_indices=np.where(self.test_fold==train_index)[0]\n test_indices=np.where(self.test_fold==(train_index+1)%2)[0]\n if self.shuffle:\n self.rng.shuffle(train_indices)\n self.rng.shuffle(test_indices)\n yield train_indices, test_indices", "def split(bn, node, Z=None):\n # no estoy seguro si hace falta crear nuevas variables, pero mas\n # vale prevenir que lamentar\n for v in bn.V:\n if v.name == node:\n node = v\n break\n if Z is None:\n Z = bn.G[node]\n\n V = [RandomVariable(var.name, var.Domain) for var in bn.V]\n\n # mapeo de variables viejas a las nuevas. Sirve mas adelante para\n # crear la lista de arcos\n old2new = dict(zip(bn.V, V))\n\n # crear el nuevo nodo y agregarlo a la lista de variables\n newnode = RandomVariable(node.name + \"*\", node.Domain)\n V.append(newnode)\n\n # crear la lista de arcos\n E = []\n for parent, children in bn.G.iteritems():\n for child in children:\n if parent == node and child in Z:\n E.append((newnode, old2new[child]))\n else:\n E.append((old2new[parent], old2new[child]))\n\n # calcular los CPTs\n\n # para cada variable no afectada por el split + variable papa: copiar cpts\n for v in bn.V:\n if not (v in Z):\n old2new[v].cpt = Factor([old2new[e] for e in v.cpt.domain()], v.cpt.getfunction())\n\n # para la variable creada por el split: cpt uniforme\n newnode.cpt = Factor([newnode], [1.0/len(newnode.domain()) for e in newnode.domain()])\n\n # para cada variable hijo afectada por el split: cpt igual a anterior, pero\n # con lista de papas cambiada\n def cp(e):\n if e == node:\n return newnode\n\n return old2new[e]\n\n for v in Z:\n old2new[v].cpt = Factor([cp(e) for e in v.cpt.domain()], v.cpt.getfunction())\n \n name = bn.name + \" splitted\"\n beta = 1\n return DBN(V,E,name,\"\"), newnode", "def partition_data(self):\n\n _header_ = self._header_ + 'partition_data(): '\n\n if self.verbose:\n print(_header_ + 'Partitioning data ...')\n\n network = self._useful_network()\n\n if self.nidx_train:\n # The only reason that allows .nidx to not be empty would be that a training Data was copied over\n # hence, the training node indices are retained and need to be excluded\n print(_header_ + 'Excluding %d training nodes transfered from training dataset ...' % len(self.nidx_train))\n nidx = set(self.nidx2lidx.keys()) - set(self.nidx_train)\n self.nidx_exclude += self.nidx_train\n self.nidx_train = []\n else:\n nidx = set(self.nidx2lidx.keys())\n\n for l in nidx:\n if l in network:\n if self.node_labels[l]:\n self.nidx_train.append(l)\n else:\n self.nidx_exclude.append(l)\n\n if self.verbose:\n print(_header_ + 'Found %d nodes' % len(self.nidx2lidx))\n print(' %d nodes with labels of interest' % len(self.nidx_train))\n print(' %d nodes can be used to predict' % len(self.nidx_pred))\n print(' %d nodes cannot be mapped due to lack of mappable links' % len(self.nidx_exclude))\n\n return self", "def _split_heads(self, x: torch.Tensor) -> torch.Tensor:\n depth = x.size(-1)\n split_x = torch.reshape(x, (\n x.size(0), x.size(1),\n self._hparams.num_heads, depth // self._hparams.num_heads))\n return split_x.permute((0, 2, 1, 3))", "def _split_data(self, x, y):\n\tindices = range(self.N)\n\tnp.random.shuffle(indices)\n\ttrain_idx, test_idx = indices[:self.TRAIN_SIZE], indices[self.TRAIN_SIZE:]\n\treturn (x[train_idx,:], y[train_idx,:], x[test_idx,:], y[test_idx,:])", "def partition(data: list, parts: list, *args: float) -> list:\n random.seed(42)\n partition_names = parts\n random.shuffle(data)\n n = len(data)\n rem, a, b = n, 0, 0\n parts = []\n\n for p in args:\n b = a + int(n*p)\n parts.append(data[a:b])\n rem -= (b - a)\n a = b\n # end\n\n parts.append(data[-rem:])\n return parts", "def split_data(self):\n self.train, self.val, self.test_x, self.test_y = [], [], [], []\n train_size = self.horizon\n # This assumes all countries have the same length.\n # The minus two gives space for the validation and test sets as they will overshoot.\n k_folds = len(self.countries[0].data)//self.horizon - 2\n for _ in range(k_folds):\n tr, v, te_x, te_y = self.cross_validate(train_size)\n self.train.append(tr), self.val.append(v), self.test_x.append(te_x), self.test_y.append(te_y)\n train_size += self.horizon", "def _create_split(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n helper.make_attribute('split', op.parts),\n ])\n return node", "def split_data(num_samples, num_splits):\n\n kf = sklearn.model_selection.KFold(n_splits=num_splits, random_state=0);\n return kf.split(range(num_samples))", "def split_train_val(self,ratio=.1):\n lim = int(np.ceil(len(self.train) * ratio))\n order = list(range(len(self.train)))\n np.random.shuffle(order)\n self.train_train = self.train.ix[order[lim:]]\n self.train_val = self.train.ix[order[:lim]]\n log(\"Split data into training/val: {} -> {} {}\".format(\n len(self.train),len(self.train_train),lim))", "def split_to_train_test(split_ratio, input_data):\n\n data = input_data.drop_duplicates()\n data = data.sample(frac = 1)\n data = np.r_[data]\n rows, columns = data.shape\n a = int(rows*split_ratio)\n train_data = data[0: a]\n test_data = data[a: rows+1]\n\n return train_data, test_data", "def split(self, node, width, height):\r\n node.used = True\r\n node.down = SquareAlgorithmNode(x=node.x,\r\n y=node.y + height,\r\n width=node.width,\r\n height=node.height - height)\r\n node.right = SquareAlgorithmNode(x=node.x + width,\r\n y=node.y,\r\n width=node.width - width,\r\n height=height)\r\n return node", "def split_data(self,test=False):\n shuffle_index = torch.randperm(self.train_target.shape[0])\n load = shuffle_index.shape[0]\n train_input_shuffle = self.train_input[shuffle_index]\n train_target_shuffle = self.train_target[shuffle_index]\n train_classes_shuffle = self.train_classes[shuffle_index]\n index_train = self.index_for_equal_class(train_target_shuffle[:load//2])\n train_input = train_input_shuffle[index_train]\n train_target = train_target_shuffle[index_train]\n train_classes = train_classes_shuffle[index_train]\n if not test:\n index_test = self.index_for_equal_class( train_target_shuffle[load//2:]) + load//2\n test_input = train_input_shuffle[index_test]\n test_target = train_target_shuffle[index_test]\n test_classes = train_classes_shuffle[index_test]\n else:\n index_test = self.index_for_equal_class(self.test_target)\n test_input = self.test_input[index_test]\n test_target = self.test_target[index_test]\n test_classes = self.test_classes[index_test]\n train_input, mean, std = normalize(train_input)\n test_input, _, _ = normalize(test_input,mean,std)\n return train_input, train_target, train_classes ,test_input ,test_target ,test_classes", "def split_data(x, y, ratio, seed=1):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Input:\n - x (ndarray) : binary prediction for set 1\n - y (ndarray) : binary prediction for set 2\n - ratio (ndarray) : binary prediction for set 3\n - seed (float) : indices of the data points in set 1 \n Output: \n - train_x (ndarray) : binary prediction for set 1\n - train_y (ndarray) : binary prediction for set 2\n - test_x (ndarray) : binary prediction for set 3\n - test_y (ndarray) : indices of the data points in set 1\n \"\"\"\n # set seed and shuffle the indices\n np.random.seed(seed)\n shuffle_indices = np.random.permutation(np.arange(len(y)))\n shuffled_y = y[shuffle_indices]\n shuffled_x = x[shuffle_indices]\n \n #splits the set according to the ratio on the shuffled set\n ratio_idx = int(np.floor(ratio*len(y)))\n train_y = shuffled_y[:ratio_idx]\n train_x = shuffled_x[:ratio_idx]\n test_y = shuffled_y[ratio_idx:]\n test_x = shuffled_x[ratio_idx:]\n return train_x, train_y, test_x, test_y", "def _split_forced(self, node):\n # compute the split\n _vec = 0\n sorted_idxs = np.argsort(self.E[node.ids, _vec]).squeeze()\n n = len(sorted_idxs) // 2\n _lids = node.ids[sorted_idxs[:n]]\n _rids = node.ids[sorted_idxs[n:]]\n # compute the score of the new tubes only\n _sl = self.get_tube_score(_lids)\n _sr = self.get_tube_score(_rids)\n # register the split\n node.has_children = True\n node.thresh = np.median(self.E[node.ids, _vec]) # arbitrary\n # Note: median would not ensure equal size (because of duplicate values)\n left = SpectralNode(_lids, _vec, score=_sl, name=node.name + \"0\")\n right = SpectralNode(_rids, _vec, score=_sr, name=node.name + \"1\")\n\n return left, right", "def split_data(x, y, ratio, seed=1):\n # number of value\n num_points = len(y)\n # compute the index that split the datas\n split = int(np.floor(num_points * ratio))\n\n # set the seed to the given value\n np.random.seed(seed)\n # compute random indexes for training and testing\n rand_indexes = np.random.permutation(num_points)\n index_training = rand_indexes[:split]\n index_testing = rand_indexes[split:]\n\n return x[index_training], y[index_training], x[index_testing], y[index_testing]", "def split(self):\n\n split_fun = [Function(self.F_base) for i in range(self.nvdofs)]\n\n for i in range(self.nvdofs):\n split_fun[i].dat.data[:] = self.dat.data.reshape(-1, self.nvdofs)[:,i]\n return split_fun", "def make_splits(self):\n # produce fold/portion splits of the training indexes: these output indexes to the tr. indexes themselves\n if self.folds is not None:\n meta_trainval_idx = kfold_split(self.train_idx, self.folds, self.seed, self.labels, self.label_info)\n elif self.portion is not None:\n meta_trainval_idx = portion_split(self.train_idx, self.portion, self.seed, self.labels, self.label_info)\n else:\n meta_trainval_idx = [(np.arange(len(self.train_idx)), np.arange(0, dtype=np.int32))]\n # \"dereference\" the metaindexes to point to the data themselves\n self.trainval_idx = []\n for (tidx, vidx) in meta_trainval_idx:\n self.trainval_idx.append((self.train_idx[tidx], self.train_idx[vidx]))", "def evenly_partition_dataset(data, labels, nb_teachers):\n\n # This will floor the possible number of batches\n batch_len = int(len(data) / nb_teachers)\n\n nclasses = len(labels[0])\n print(\"Start Index Selection\")\n data_sel = [data[labels[:, j] == 1] for j in range(nclasses)]\n print(\"End Index Selection\")\n i = 0\n data_sel_id = [0] * len(labels[0])\n partition_data = []\n partition_labels = []\n\n while True:\n partition_data.append(data_sel[i][data_sel_id[i]])\n partition_labels.append(np_utils.to_categorical(i, nclasses))\n\n if len(partition_data) == batch_len:\n partition_data = np.asarray(partition_data)\n partition_labels = np.asarray(partition_labels)\n yield partition_data, partition_labels\n partition_data = []\n partition_labels = []\n\n data_sel_id[i] += 1\n if data_sel_id[i] == len(data_sel[i]):\n data_sel_id[i] = 0\n i = (i + 1) % nclasses", "def split_simple_json_language_model_dataset(dataset: SimpleJsonLanguageModelDataset, split_list):\n import numpy as np\n # create a list of lengths [0.1, 0.4, 0.5] -> [100, 500, 1000(=len_data)]\n split_list = np.multiply(np.cumsum(split_list), len(dataset)).astype(\"int\").tolist()\n # list of shuffled indices to sample randomly\n shuffled_idx = list(range(len(dataset)))\n shuffle(shuffled_idx)\n # split the data itself\n new_data = [[] for _ in range(len(split_list))]\n for sub_data_idx, (start, end) in enumerate(zip([0] + split_list[:-1], split_list)):\n for i in range(start, end):\n new_data[sub_data_idx].append(dataset.__getitem__(shuffled_idx[i]))\n # create sub sets\n sub_datasets = []\n for i in range(len(new_data)):\n ready_dict = {\n \"_labels\": dataset._labels,\n \"_label_to_idx\": dataset._label_to_idx,\n \"_chr_embed\": dataset._chr_embed,\n \"_idx_to_chr\": dataset._idx_to_chr,\n \"data\": new_data[i],\n }\n sub_datasets.append(SimpleJsonLanguageModelDataset(dataset._size, ready=ready_dict))\n return sub_datasets", "def split(self, how, nr_agents, **kwargs):\n if how == 'random':\n self.random_split(nr_agents)\n elif how == 'uniform':\n self.uniform_split(nr_agents)\n elif how == 'non_iid_uniform':\n self.non_iid_split(nr_agents, kwargs['class_per_node'], random=False)\n elif how == 'non_iid_random':\n self.non_iid_split(nr_agents, kwargs['class_per_node'], random=True)\n\n return self.get_data()", "def split_data(x, y, ratio, seed=1):\n np.random.seed(seed)\n\n N = len(y)\n rat = int(np.floor(ratio*N))\n idx = np.random.choice(np.arange(len(x)), N, replace=False)\n \n x_ = x[idx]\n y_ = y[idx]\n \n train_x = x_[:rat]\n test_x = x_[rat:]\n \n train_y = y_[:rat]\n test_y = y_[rat:]\n \n return train_x, train_y, test_x, test_y", "def split(self):\n st = time()\n tokens = self._build_args.tokens\n\n for token_split in IStorage._tokens_partitions(tokens, config.min_number_of_tokens,\n config.number_of_partitions):\n storage_id = uuid.uuid4()\n log.debug('assigning to %s %d tokens', str(storage_id), len(token_split))\n new_args = self._build_args._replace(tokens=token_split, storage_id=storage_id)\n self.__class__._store_meta(new_args)\n\n yield self.__class__.build_remotely(new_args)\n log.debug('completed split of %s in %f', self.__class__.__name__, time() - st)", "def split(self, params):\n\n if \"train_df\" in params.keys():\n self.df = params[\"train_df\"]\n if \"test_df\" in params.keys():\n self.df = pd.concat([self.df, params[\"test_df\"]])\n if \"n_splits\" in params.keys():\n self.n_splits = params[\"n_splits\"]\n if \"shuffle\" in params.keys():\n self.shuffle = params[\"shuffle\"]\n if \"random_state\" in params.keys():\n self.random_state = params[\"random_state\"]\n\n self.__validate_input()\n\n n_samples = num_of_samples(self.df)\n\n if self.n_splits > n_samples:\n raise ValueError(\n f\"Cannot have number of splits {self.n_splits} > number of\"\n f\" samples {n_samples}\"\n )\n\n indices = np.arange(n_samples)\n for test_indices in self.__iter_test_indices(n_samples):\n train_indices = indices[np.logical_not(test_indices)]\n test_indices = indices[test_indices]\n yield train_indices, test_indices", "def split_data(data):\n testing_set = data.applymap(lambda x: 0)\n\n taken_courses_flat = data.stack().to_frame()\n taken_courses_flat = taken_courses_flat[taken_courses_flat[0] == 1]\n\n for student in taken_courses_flat.index.get_level_values('PersonID').unique():\n courses = taken_courses_flat.loc[student]\n for course in courses.sample(frac=0.2, replace=False).index:\n testing_set.loc[student, course] = 1\n training_set = data - testing_set\n\n # Numpifies the data\n train_np = training_set.apply(axis=1, func=lambda x: x.astype(int)).as_matrix()\n test_np = testing_set.apply(axis=1, func=lambda x: x.astype(int)).as_matrix()\n\n # the indices of each user\n users = np.array(np.arange(data.shape[0])[np.newaxis].T, dtype=np.int32)\n\n return train_np, test_np, users", "def _split_kmeans(self, node):\n\n # bi-partition with k-means until children have enough samples or max outliers is reached\n n_outliers = 0\n ids = node.ids\n left, right = None, None\n\n # define the score to improve upon\n if self.n_clusters >= self.min_leaves and node.size <= self.max_leaf_size:\n # require an improvement of children\n best_score = node.score\n # limit outliers to smallest cluster possible\n max_outliers = self.min_leaf_size\n else:\n # just take the best split (even if children are worse)\n best_score = None\n # no limit on outliers: always split\n max_outliers = np.inf\n\n # iterate until valid split or reached max outliers\n while n_outliers < max_outliers:\n labels = get_kmeans_split(self.E[ids])\n if labels is None:\n # could not split\n break\n # compute the split\n _lids = ids[labels == 0]\n _rids = ids[labels == 1]\n # check if the tubes are not too small\n _nl, _nr = len(_lids), len(_rids)\n if _nl + _nr != len(ids):\n raise SplitError(\"BUG in kmeans\")\n if _nl >= self.min_leaf_size and _nr >= self.min_leaf_size:\n # both children are large enough\n _sl = self.get_tube_score(_lids)\n _sr = self.get_tube_score(_rids)\n # get the score of this split\n score = min(_sl, _sr)\n # check if the split improves (each child has better score than the parent)\n if best_score is None or score > best_score:\n # register the split (vec is used to store depth in the tree)\n node.has_children = True\n best_score = score\n left = SpectralNode(\n _lids, node.vec + 1, score=_sl, name=node.name + \"0\")\n right = SpectralNode(\n _rids, node.vec + 1, score=_sr, name=node.name + \"1\")\n break\n elif _nl < self.min_leaf_size and _nr >= self.min_leaf_size:\n # left children is too small: add as outlier\n self.labels[_lids] = -1\n n_outliers += _nl\n # carry on with this subset\n ids = _rids\n elif _nr < self.min_leaf_size and _nl >= self.min_leaf_size:\n # right children is too small: add as outlier\n self.labels[_rids] = -1\n n_outliers += _nr\n # carry on with this subset\n ids = _lids\n else:\n # both too small: node is a leaf\n #msg = 'Both children are too small:'\n #msg+= ' too many outliers ({0} >= max_outliers={1})'.format(n_outliers, max_outliers)\n #msg+= ' or too small node size ({0})'.format(node.size)\n #raise SplitError(msg)\n break\n\n return left, right", "def split_dataset(dataset, n, seed=0):\n assert(n <= len(dataset))\n keys = list(range(len(dataset)))\n np.random.RandomState(seed).shuffle(keys)\n keys_1 = keys[:n]\n keys_2 = keys[n:]\n return _SplitDataset(dataset, keys_1), _SplitDataset(dataset, keys_2)", "def split_dataset(dataset, n, seed=0):\n assert n <= len(dataset)\n keys = list(range(len(dataset)))\n np.random.RandomState(seed).shuffle(keys)\n keys_1 = keys[:n]\n keys_2 = keys[n:]\n return _SplitDataset(dataset, keys_1), _SplitDataset(dataset, keys_2)", "def test_split(self):\n array = np.arange(1000)\n df = DataFlow.from_numpy(array)\n\n # first, test throw errors on invalid arguments\n def assert_invalid_arg(**kwargs):\n with self.assertRaises(ValueError):\n df.split(**kwargs)\n assert_invalid_arg(partitions=[])\n assert_invalid_arg(partitions=[1000, 1])\n assert_invalid_arg(partitions=[1000, -1])\n assert_invalid_arg(partitions=[1, 2])\n assert_invalid_arg(portions=[])\n assert_invalid_arg(portions=[1.0, 0.1])\n assert_invalid_arg(portions=[1.0, -1])\n assert_invalid_arg(portions=[0.1, 0.2])\n\n # next, test split without shuffling\n df1, df2, df3 = df.split(partitions=[700, 200, 100])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n df1, df2, df3 = df.split(portions=[-1, 0.2, 0.1])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n\n # finally, test split with shuffling\n df1, df2 = df.split(portions=[0.5, -1], shuffle=True)\n self.assertEquals(len(df1), 500)\n self.assertEquals(len(df2), 500)\n df_array = np.concatenate([df1.all()[0], df2.all()[0]], axis=0)\n self.assertFalse(np.all(df_array == array))\n np.testing.assert_array_equal(np.sort(df_array), array)", "def random_split(dataset, lengths):\n if sum(lengths) != len(dataset):\n raise ValueError(\"Sum of input lengths does not equal the length of the input dataset!\")\n\n indices = randperm(sum(lengths))\n return [Subset(dataset, indices[offset - length:offset]) for offset, length in zip(_accumulate(lengths), lengths)]", "def split(x, y, t):\n return (Shape(t, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk),\n NodeReservation(Shape(x.wallTime - t, x.memory, x.cores, x.disk)))", "def _create_train_val_split(\n self, data_size, shuffle = False, seed = None\n ):\n val_size = int(np.round(data_size * self._val_fraction))\n val_size = max(1, val_size) if self._val_fraction > 0 else 0\n train_size = data_size - val_size\n train_split = np.concatenate(\n [np.ones([train_size], dtype=np.int32),\n np.zeros([val_size], dtype=np.int32)])\n if shuffle:\n np.random.RandomState(seed).shuffle(train_split)\n return train_split", "def DataSplit(self, data):\n train_X,test_X,train_y,test_y=train_test_split(data[0],data[1], random_state=2)\n valid_X,valid_y=train_test_split(data[0],data[1],random_state=2,test_size=0.15)[1],train_test_split(data[0],data[1],random_state=2,test_size=0.15)[3]\n return (train_X,test_X,valid_X,train_y,test_y,valid_y)", "def prepare_data_for_g(self):\n\n paths = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n sample, paths_from_i = self.sample(i, self.trees[i], config.n_sample_gen, for_d=False)\n if paths_from_i is not None:\n paths.extend(paths_from_i)\n # for each root, we generate 20 samples, each sample is equal to one path from root to that sample\n # So, we will get maximum (num_root x 20) paths\n # path is a list with length = (N x num_sample), with num_sample = 20\n # paths =[[path_root1_to_sample1],[path_root1_to_sample2],....,[path_root1_to_sample20],\n # [path_root2_to_sample1],[path_root2_to_sample2],....,[path_root2_to sample20]\n # .\n # .\n # [path_rootN_to_sample1],[path_rootN_to_sample2],....,[path_rootN_to_sample20]]\n # get_node_pairs_from_path\n\n node_pairs = list(map(self.get_node_pairs_from_path, paths))\n # node_pairs = [[node pairs for path_root1_to_sample1],[node pairs for path_root1_to_sample2],....,[node pairs for path_root1_to_sample20],\n # [node_pairs for path_root2_to_sample1],[node pairs for path_root2_to_sample2],....,[node pairs for path_root2_to sample20],\n # .\n # .\n # [node pairs for path_rootN_to_sample1],[node pairs for path_rootN_to_sample2],....,[node pairs for path_rootN_to_sample20]]\n\n node_1 = []\n node_2 = []\n for i in range(len(node_pairs)):\n for pair in node_pairs[i]:\n node_1.append(pair[0])\n node_2.append(pair[1])\n # reward = self.sess.run(self.discriminator.reward,\n # feed_dict={self.discriminator.node_id: np.array(node_1),\n # self.discriminator.node_neighbor_id: np.array(node_2)})\n reward = self.discriminator.forward(node_1, node_2)\n return node_1, node_2, reward", "def split_data(y, tx, train_ratio):\n N = len(y)\n indices = np.random.permutation(N)\n \n tx = tx[indices, :]\n y = y[indices]\n \n bound = int(N * train_ratio)\n return y[:bound], tx[:bound, :], y[bound:], tx[bound:, :]", "def split_data(data, labels, proportion):\n size = data.shape[0]\n np.random.seed(42)\n s = np.random.permutation(size)\n split_idx = int(proportion * size)\n return (data[s[:split_idx]], data[s[split_idx:]], labels[s[:split_idx]], labels[s[split_idx:]])", "def compute_nodeset(data):\n xset = NodeSet()\n for nodeset in data.split():\n xset.update(nodeset)\n return xset", "def split_data(x, weight = 0.9):\n offset = int(len(x) * weight)\n return x[:offset], x[offset:]", "def test_split_data(self):\n Xlists = tuple([[np.zeros((200,9)) for b in range(14)] for c in range(9)])\n ybinarylists = [np.zeros((14,12)) for c in range(9)]\n indices = slice(7, 9)\n x_test, y_test = tutorial_pamap2.split_data(Xlists, ybinarylists, \\\n indices)\n test = y_test[0].shape == (12,) and x_test[0].shape == (200, 9)\n assert test", "def splitPlace(self):\r\n \r\n \r\n \r\n nodeSortedIter = sorted(self.G.degree_iter(),key=itemgetter(1),reverse=True)\r\n \r\n placeCnt = 0\r\n \r\n for node in nodeSortedIter:\r\n if placeCnt<self.cells/2:\r\n self.sitesA.append(node[0])\r\n self.G.node[node[0]][\"part\"] = 'A'\r\n else:\r\n self.sitesB.append(node[0])\r\n self.G.node[node[0]][\"part\"] = 'B'\r\n placeCnt+=1", "def split_data(x, y, ratio, seed=1):\n # set seed\n np.random.seed(seed)\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n ind = np.random.permutation(y.shape[0])\n threshold = int(y.shape[0]*ratio)\n return y[ind[:threshold]], x[ind[:threshold]], y[ind[threshold:]], x[ind[threshold:]]", "def data_split(data, labels, train_ratio=0.5, rand_seed=42):\n\n assert 0 <= train_ratio <= 1, \"Error: training set ratio must be between 0 and 1\"\n\n x_train, x_temp, y_train, y_temp = train_test_split(data,\n labels,\n train_size=train_ratio,\n random_state=rand_seed)\n\n x_val, x_test, y_val, y_test = train_test_split(x_temp,\n y_temp,\n train_size=0.5,\n random_state=rand_seed)\n\n return x_train, x_val, x_test, y_train, y_val, y_test", "def train_val_test_split(data):\n raise NotImplementedError", "def split_data(x, y, ratio, seed=1):\n # set seed\n np.random.seed(seed)\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n \n def split_data(x, y, ratio, seed=1):\n \"\"\"split the dataset based on the split ratio.\"\"\"\n # set seed\n np.random.seed(seed)\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n trainDataLen = round(len(y)*ratio)\n \n trainDataID = random.sample(range(len(y)), trainDataLen)\n \n # USing bool value to obtaint he remainling data for validation data set\n validDataID = np.array(range(len(y))) + 1\n validDataID[trainDataID] = 0\n validDataID = validDataID >0\n \n \n # obtain the trainning data\n trainDataX = x[trainDataID]\n trainDataY = y[trainDataID]\n \n # obtain the validation data\n validDataX = x[validDataID]\n validDataY = y[validDataID] \n \n return trainDataX,trainDataY, validDataX, validDataY\n \n #raise NotImplementedError", "def split_data(X, scaling, ids, y, split_ratio=0.2):\r\n split = int(X.shape[0] * split_ratio) # index must be int\r\n X_test = X[:split, :, :, :]\r\n scaling_test = scaling[:split, :]\r\n ids_test = ids[:split]\r\n y_test = y[:split, :]\r\n X_train = X[split:, :, :, :]\r\n scaling_train = scaling[split:, :]\r\n ids_train = y[split:]\r\n y_train = y[split:, :]\r\n\r\n return X_train, scaling_train, ids_train, y_train, X_test, scaling_test, ids_test, y_test", "def split_data(dataset, test_size=0.5):\n shuffled_data = np.random.RandomState(seed=721).permutation(dataset)\n train_set = shuffled_data[: int(len(dataset) * (1 - test_size)), :]\n test_set = shuffled_data[int(len(dataset) * (1 - test_size)):, :]\n return train_set, test_set", "def __init__(self):\r\n self.bucket = []\r\n for i in range(4096):\r\n self.bucket.append(Node(0,0))", "def split_all(self):\n for domino in self.dominoes[:]:\n self.split(domino)", "def splits(self) -> List[int]:\n if self._splits is None:\n self.RefreshStats()\n return self._splits", "def split(self):\n left = BPlusNode(self.order)\n right = BPlusNode(self.order)\n mid = self.order // 2\n\n left.keys = self.keys[:mid]\n left.values = self.values[:mid]\n\n right.keys = self.keys[mid:]\n right.values = self.values[mid:]\n\n # When the node is split, set the parent key to the left-most key of the right child node.\n self.keys = [right.keys[0]]\n self.values = [left, right]\n self.leaf = False", "def data_split(dataset, val_ratio=0.1, test_ratio=0.1, seed=1234):\n\n\t# How you grab the labels will depend on what type of Pytorch Dataset object 'dataset' is\n\t# (i.e. ImageFolder/DatasetFolder or not)\n\n\t# For fun, check the method resolution order (MRO) of 'dataset'\n\tprint('Dataset object\\'s inheritance: ', type(dataset).__mro__)\n\n\t# Determine what kind of Dataset object it is, then grab labels\n\t# Warning: currently this will break for anything other than an ImageFolder or CIFAR10 train set\n\tif isinstance(dataset, datasets.CIFAR10):\n\t\tlabels = dataset.train_labels\n\telif isinstance(dataset, datasets.ImageFolder):\n\t\tlabels = [img[1] for img in dataset.imgs]\n\telse:\n\t\terror('Dataset not supported yet')\n\n\t# Calculate class priors, (number in class)/(size of dataset)\n\tidcs = [i for i in range(len(dataset))]\n\tsamples_per_class = np.bincount(np.array(labels))\n\tpriors = samples_per_class/len(labels)\n\n\t# Number of samples in each class for val and test set \n\tval_per_class = np.ceil(samples_per_class*val_ratio).astype(np.int)\n\ttest_per_class = np.ceil(samples_per_class*test_ratio).astype(np.int)\n\n\t# Copy and shuffle the labels and corresponding indices to randomize before splitting\n\tshuffled_labels = list(labels)\n\tshuffled_idcs = list(idcs)\n\trandom.Random(seed).shuffle(shuffled_labels)\n\trandom.Random(seed).shuffle(shuffled_idcs)\n\n\t# Iterate through, grabbing indices for each class to place in validation set\n\t# until the desired number is reached\n\tval_idcs = []\n\tval_counts = np.zeros(val_per_class.shape)\n\n\tfor i, l in zip(shuffled_idcs, shuffled_labels):\n\t\t# Check if validation set quota has been reached yet for this class\n\t\tif val_counts[l] < val_per_class[l]:\n\t\t\tval_idcs.append(i)\n\t\t\tval_counts[l] += 1\n\n\t\t# Check if stopping point is reached\n\t\tif (val_counts == val_per_class).all():\n\t\t\tbreak\n\n\t# Repeat for test set\n\ttest_idcs = []\n\ttest_counts = np.zeros(test_per_class.shape)\n\tfor i, l in zip(shuffled_idcs, shuffled_labels):\n\t\t# Check if this index is already in val set\n\t\tif i in val_idcs:\n\t\t\tcontinue\n\n\t\t# Check if test set quota has been reached yet for this class\n\t\tif test_counts[l] < test_per_class[l]:\n\t\t\ttest_idcs.append(i)\n\t\t\ttest_counts[l] += 1\n\n\t\t# Check if stopping point is reached\n\t\tif (test_counts == test_per_class).all():\n\t\t\tbreak\n\n\t# Get train indices too (all the remaining samples not in val or test)\n\ttrain_idcs = [j for j in idcs if j not in val_idcs+test_idcs]\n\n\t# Split the data\n\ttrain = Subset(dataset, train_idcs)\n\tval = Subset(dataset, val_idcs)\n\ttest = Subset(dataset, test_idcs)\n\n\treturn train, val, test", "def __split_node(self, cur_node):\n temp = self.Node(cur_node.data_list[len(cur_node.data_list) / 2:], cur_node.next_node)\n cur_node.data_list = cur_node.data_list[:len(cur_node.data_list) / 2]\n cur_node.next_node = temp\n\n if cur_node == self.tail:\n self.tail = cur_node.next_node", "def split_data(dataset, ratio = 0.9):\n cutoff_row = int(dataset.shape[0] * ratio)\n return (dataset[:cutoff_row], dataset[cutoff_row:])", "def chunk(self, shape, split) -> NotImplementedError:\n raise NotImplementedError()", "def create_dataset_splits(n, p=1.0):\n\tperm = np.random.permutation(n).tolist()\n\tidx = int(p * n)\n\treturn perm[:idx]", "def getData(trainSize):\r\n return splitData([getReal(), getFake()], trainSize=trainSize)", "def compute_splits(self, G, nw_name='test', samp_frac=0.01, split_id=0, verbose=False):\n # Sample the required number of node pairs from the graph\n train_E, train_E_false = stt.random_edge_sample(nx.adjacency_matrix(G, nodelist=range(len(G.nodes))),\n samp_frac, nx.is_directed(G))\n\n # Raise an error if no edges were selected while sampling matrix entries (both edges and non-edges are required)\n if len(train_E) == 0:\n raise ValueError(\"Sampling fraction {} on {} network is too low, no edges were selected.\".format(samp_frac,\n nw_name))\n\n # Set class attributes to new values\n self.set_splits(TG=G, train_E=train_E, train_E_false=train_E_false, samp_frac=samp_frac,\n directed=nx.is_directed(G), nw_name=nw_name, split_id=split_id, verbose=verbose)\n\n return train_E, train_E_false" ]
[ "0.6593751", "0.64535356", "0.6275442", "0.624764", "0.6210491", "0.61586165", "0.61282647", "0.6122043", "0.6105798", "0.6105246", "0.6102763", "0.60595816", "0.6045628", "0.6025434", "0.60080785", "0.5965141", "0.5959221", "0.59428424", "0.5918808", "0.59182876", "0.59140563", "0.5881982", "0.58554405", "0.58533686", "0.5852365", "0.5845334", "0.58368915", "0.5830529", "0.5824757", "0.58202374", "0.5801989", "0.5790324", "0.5790206", "0.57861185", "0.5782661", "0.57761735", "0.5770978", "0.57354283", "0.5733996", "0.5722803", "0.5703382", "0.56619817", "0.5655152", "0.5650165", "0.5644863", "0.56384915", "0.56381243", "0.563119", "0.5626938", "0.56264746", "0.56262827", "0.56214964", "0.5615967", "0.56107634", "0.5605065", "0.5570426", "0.5549618", "0.5548596", "0.55476856", "0.5542822", "0.55334055", "0.55275124", "0.55263275", "0.55183744", "0.5501449", "0.54995865", "0.54970056", "0.5492551", "0.5478886", "0.54734105", "0.5469502", "0.5468563", "0.54679984", "0.5466191", "0.5463258", "0.54594827", "0.545736", "0.5451305", "0.54506654", "0.5450049", "0.5449962", "0.54399353", "0.543551", "0.5426258", "0.5421547", "0.54194826", "0.5411877", "0.541067", "0.53972715", "0.5392389", "0.53862226", "0.53862065", "0.5379801", "0.53731996", "0.53715867", "0.5370562", "0.5369985", "0.53626156", "0.536113", "0.5360226" ]
0.6529111
1
Give nodes only certain number of class labels as data.
def non_iid_split(self, nr_agents, class_per_node, random): unique = list(set(self.labels.tolist())) len_unique = len(unique) # Create array that assigns a class to specific nodes # Use 'np.arange' to ensure every class is represented before repeating # A row represents nr_agents, a column represents classes per node agent_class_master = np.arange(start=0, stop=nr_agents * class_per_node) % len_unique np.random.shuffle(agent_class_master) agent_class_master = agent_class_master.reshape(nr_agents, class_per_node) # Split data by labels sample_list = [[] for _ in range(len_unique)] for i in range(len(self.labels)): sample_list[self.labels[i]].append(self.samples[i]) # By class creates uniform or random indices splits to partition data to agents evenly class_count = np.bincount(agent_class_master.ravel()) class_indices = {} for i in range(len(class_count)): if random: indices = sorted(np.random.randint(0, high=len(sample_list[i]), size=class_count[i] - 1).tolist()) indices = [0] + indices indices += [len(sample_list[i])] class_indices[i] = indices else: class_indices[i] = np.linspace(start=0, stop=len(sample_list[i]), num=class_count[i] + 1, dtype=int).tolist() # Main loop that partitions data by the assigned class and proper amount all_agents = [] all_class = [] for agent in agent_class_master: agent_data = [] agent_class = [] for cls in agent: # Proportioned indices for data and grab correctly indexed data temp_indices = class_indices[cls] data_for_agent = sample_list[cls][temp_indices[0]:temp_indices[1] - 1] # Add data and class to this agents list agent_data = agent_data + data_for_agent agent_class = agent_class + [cls for _ in range(len(data_for_agent))] # Drop first index since we used that data, forces next person to use next index class_indices[cls] = temp_indices[1:] # Append agents data and class labels in order all_agents.append(torch.stack(agent_data)) all_class.append(torch.tensor(agent_class)) self.samples = all_agents self.labels = all_class
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multi_class5_classification_dataset_sparse_labels() -> tf.data.Dataset:\n\n # Create features\n X = tf.random.normal(shape=(100, 3))\n\n # Create one multi-class (one hot) labels\n y = tf.random.uniform(minval=0, maxval=5, dtype=tf.int32, shape=(100,))\n\n return tf.data.Dataset.from_tensor_slices((X, y))", "def _extend_label_set(self, data, trainer_cls, session):\n # Select which nodes to label next, and predict their labels.\n selected_samples, selected_labels = self._select_samples_to_label(\n data, trainer_cls, session)\n # Replace the labels of the new nodes with the predicted labels.\n if selected_samples.shape[0] > 0:\n data.label_samples(selected_samples, selected_labels)\n return selected_samples", "def labels_to_labels(class_labels, num_classes =4):\n levels = []\n for label in class_labels:\n levels_from_label = label_to_levels(int(label), num_classes=num_classes)\n levels.append(levels_from_label)\n return torch.stack(levels).cuda()", "def num_labels(self) -> int:\n raise NotImplementedError", "def assign_random_labels(dataset: pd.DataFrame, n_classes: int) -> pd.DataFrame:\n labels = np.zeros(shape=(len(dataset), n_classes))\n labels = pd.DataFrame(labels, columns=[\"l{}\".format(i) for i in range(n_classes)])\n for label in labels.values:\n for cla in range(n_classes):\n label[cla] = random.randint(0, 1)\n\n return labels", "def cut_classes(self, dataset, classes, max_size, label):\n\n # Cherry picked classes\n class_dfs = []\n for c in classes:\n picked_data = dataset.loc[(dataset.loc[:,label] == c),:].reset_index(drop=True)\n class_dfs.append(picked_data.loc[0:min(len(picked_data), max_size),:])\n #class_dfs.append(picked_data.sample(n=min(len(picked_data), max_size)))\n\n # Concat\n data = pd.concat(class_dfs)\n return data", "def label_nodes(sizes):\n labels = np.concatenate([tup[0] * np.ones(tup[1])\n for tup\n in enumerate(sizes)]).astype(int)\n return list(labels)", "def n_classes(self):\n raise NotImplementedError", "def n_classes(self):\n raise NotImplementedError", "def count_nodes(self, term=None, labels: istr = None):", "def random_labels(size, num_classes):\n return torch.randint(high=num_classes, size=(size,)).int().tolist()", "def label_n_elements(self, n_elements: int, **kwargs) -> int:\n # labels\n assert Exception(\"not implemented\")", "def multi_label5_classification_dataset() -> tf.data.Dataset:\n\n # Create features\n X = tf.random.normal(shape=(100, 3))\n\n # Create one multi-class (one hot) labels\n y = tf.random.normal(shape=(100, 5))\n y = tf.cast(y > 0.5, dtype=tf.int32)\n\n return tf.data.Dataset.from_tensor_slices((X, y))", "def multiclass_toy_data(): \n #dataset = np.zeros((10,5), np.int)\n dataset = np.array([[0,0,0,0,4],\n [0,0,0,0,5],\n [1,3,0,0,0],\n [3,1,0,0,1],\n [0,0,6,2,0],\n [0,0,0,0,0],\n [0,0,1,7,2], \n [0,0,5,1,5],\n [0,0,34,0,0],\n [0,0,3,0,0]])\n Y = np.array([3,3,2,2,1,0,1,1,0,0])\n #for i in range(10):\n #for j in range(5):\n #dataset[i][j] = np.random.randint(0,10) \n dataset = np.column_stack((dataset, Y))\n return (dataset)", "def n_classes(self):\n raise NotImplementedError()", "def encode_labels(labels, nclass=5):\n y = np.zeros((len(labels), nclass)).astype('float32')\n for j, yj in enumerate(labels):\n for i in range(nclass):\n if i+1 == np.floor(yj) + 1:\n y[j, i] = yj - np.floor(yj)\n if i+1 == np.floor(yj):\n y[j, i] = np.floor(yj) - yj + 1\n return y", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def count_nodes(self, term=None, labels: istr = None) -> int:", "def make_multilabel_target(num_classes, classes):\n target = np.zeros(num_classes, dtype=np.uint8)\n target[classes] = 1\n return target", "def mask_classes(outputs: torch.Tensor, dataset: ContinualDataset, k: int) -> None:\n outputs[:, 0:k * dataset.N_CLASSES_PER_TASK] = -float('inf')\n outputs[:, (k + 1) * dataset.N_CLASSES_PER_TASK:\n dataset.N_TASKS * dataset.N_CLASSES_PER_TASK] = -float('inf')", "def get_label_classes(scope, op, node_names=False):\n options = scope.get_options(op, dict(nocl=False))\n if options[\"nocl\"]:\n if len(op.classes_.shape) > 1 and op.classes_.shape[1] > 1:\n raise RuntimeError(\n \"Options 'nocl=True' is not implemented for multi-label \"\n \"classification (class: {}).\".format(op.__class__.__name__)\n )\n classes = np.arange(0, len(op.classes_))\n elif node_names:\n try:\n options = scope.get_options(op, dict(zipmap=False))\n zipcol = options[\"zipmap\"] == \"columns\"\n except NameError:\n zipcol = False\n if zipcol:\n clnames = op.classes_.ravel()\n if np.issubdtype(clnames.dtype, np.integer) or clnames.dtype == np.bool_:\n classes = np.array([\"i%d\" % c for c in clnames])\n else:\n classes = np.array([\"s%s\" % c for c in clnames])\n else:\n classes = op.classes_\n elif hasattr(op, \"classes_\"):\n classes = op.classes_\n elif hasattr(op, \"intercept_\"):\n classes = len(op.intercept_)\n elif hasattr(op, \"y_\"):\n # _ConstantPredictor\n classes = np.array(list(sorted(set(op.y_))))\n else:\n raise RuntimeError(\n \"No known ways to retrieve the number of classes for class %r.\"\n \"\" % type(op)\n )\n return classes", "def multi_class5_classification_dataset() -> tf.data.Dataset:\n\n # Create features\n X = tf.random.normal(shape=(100, 3))\n\n # Create one multi-class (one hot) labels\n y = tf.random.normal(shape=(100, 5))\n y = tf.one_hot(tf.argmax(y, axis=-1), depth=5)\n\n return tf.data.Dataset.from_tensor_slices((X, y))", "def encode_labels(labels, nclass=5):\n Y = np.zeros((len(labels), nclass)).astype('float32')\n for j, y in enumerate(labels):\n for i in range(nclass):\n if i+1 == np.floor(y) + 1:\n Y[j,i] = y - np.floor(y)\n if i+1 == np.floor(y):\n Y[j,i] = np.floor(y) - y + 1\n return Y", "def init_label_dict(num_classes):\n label_dict={}\n for i in range(num_classes):\n label_dict[i]=(0,0,0)\n return label_dict", "def get_num_classes(self):", "def classify_k_cluster(labels, datas):\n classify_k_cluster_to_redis(labels=labels, texts=datas)", "def filter_dataset_by_labels(labels, data, num_each_label):\n first = True\n all_ds = None\n for label in labels:\n ds_l = data.filter(lambda d: d.get(\"label\") == label).take(num_each_label)\n if first:\n all_ds = ds_l\n first = False\n else:\n all_ds = all_ds.concatenate(ds_l)\n y_ds = tf.cast(list(labels), tf.int64)\n table = tf.lookup.StaticHashTable(\n initializer=tf.lookup.KeyValueTensorInitializer(\n keys=tf.constant(y_ds),\n values=tf.constant(list(range(len(y_ds)))),\n ),\n default_value=tf.constant(-1),\n name=\"class\"\n )\n all_ds = all_ds.map(lambda d: (d.get('image'), table.lookup(d.get('label')))).cache()\n all_ds = all_ds.shuffle(DATA_SIZE, reshuffle_each_iteration=True).cache()\n return all_ds", "def number_of_nodes(self, number_of_nodes):\n\n self._number_of_nodes = number_of_nodes", "def prepare_labels(labels, class_mask):\n mask = [1 if elt else -1 for elt in class_mask]\n mask = np.array(mask)\n return labels.dot(mask)", "def num_classes():\n return NUM_CLASSES", "def num_classes(self):\n\t\treturn 10", "def class2onehot(class_labels, seq_len, batchsize, num_task):\n\n\n one_hot = torch.FloatTensor(batchsize,seq_len,num_task)\n one_hot.zero_()\n one_hot = one_hot.scatter_(1, seq_len,class_labels, 1)\n\n return one_hot", "def count_labels(labels, num_classes):\n return np.array([\n np.bincount(segment_labels, minlength=num_classes) for _, segment_labels in labels\n ])", "def label(d, X, ind_class0, ind_class1, N, V, binary):\n if binary == True:\n K = 1\n C = torch.zeros(N + V, K)\n C[ind_class0, :] = 0.0\n C[ind_class1, :] = 1.0\n else:\n K = 2\n C = torch.zeros(N + V, K)\n C[ind_class0, :] = torch.tensor([1.0, 0.0])\n C[ind_class1, :] = torch.tensor([0.0, 1.0])\n\n X_train = X[:N, :]\n X_val = X[N:, :]\n C_train = C[:N, :]\n C_val = C[N:, :]\n\n return [X_train, C_train, X_val, C_val, d, K]", "def remove_classes(data, labels, classes2keep):\n new_data = defaultdict(list)\n for i, label in enumerate(labels):\n if label in classes2keep:\n new_data[\"label\"].append(label)\n new_data[\"data\"].append(data[i])\n return np.array(new_data[\"data\"]), np.array(new_data[\"label\"])", "def load_datasets_nodelabel(names):\n\n # load datasets\n datasets = []\n if \"cs_eval\" in names:\n datasets.append(Parser('datasets/CiteSeer_Eval'))\n if \"cs_train\" in names:\n datasets.append(Parser('datasets/CiteSeer_Train'))\n if \"co_eval\" in names:\n datasets.append(Parser('datasets/Cora_Eval'))\n if \"co_train\" in names:\n datasets.append(Parser('datasets/Cora_Train'))\n\n # convert datasets into lists graphs, labels\n datasets = [dataset.parse_all_graphs() for dataset in datasets]\n attr_sets = [[get_node_attributes(graph) for graph in graphs] for graphs in datasets]\n labels = [[get_node_labels(graph) for graph in graphs] for graphs in datasets]\n # attr_sets is a list of length n, where n is the number of datasets. Then attr[0] contains a list of all node attributes\n # for dataset 0. Thus attr[0][0] contains the actual node attribute matrix (X^0) for the graph of fataset 0.\n return names, datasets, attr_sets, labels", "def __len__(self):\n return len(self.labels)", "def neural_net_label_input(self, n_classes):\n labels = tf.placeholder(tf.int32, [None, n_classes], 'y')\n return labels", "def load_data_labels(datasets):\n # Split by words\n x_text = datasets['data']\n x_text = [clean_str(sent) for sent in x_text]\n # Generate labels\n labels = [0, 1, 2, 3, 4]\n print(len(x_text))\n for i in range(len(x_text)):\n label = [0 for j in datasets['target_names']] \n label[datasets['target'][i]] = labels[i]\n labels.append(label)\n y = np.array(labels)\n return [x_text, y]", "def regroup_dataset(labels):\r\n batch_y = labels.copy()\r\n for i, label in enumerate(labels):\r\n if label in [0, 15, 19]:\r\n batch_y[i]=0\r\n if label in [1, 2, 3, 4, 5,]:\r\n batch_y[i]=1\r\n if label in [6]:\r\n batch_y[i]=2\r\n if label in [7,8,9,10]:\r\n batch_y[i]=3\r\n if label in [11,12,13,14]:\r\n batch_y[i]=4\r\n if label in [16,17,18]:\r\n batch_y[i]=5\r\n \r\n print('regrouped label', batch_y.shape)\r\n return batch_y", "def _multiclass_with_missing_class(*shape: Any, num_classes=NUM_CLASSES):\n x = torch.randint(0, num_classes, shape)\n x[x == 0] = 2\n return x", "def gen_labels(self, nidxs=None, condense_labels=False):\n\n if nidxs is None:\n nidxs = self.nidx_train\n\n y = []\n\n for r in nidxs:\n y.append(self.node_labels[r])\n\n if condense_labels:\n # This should be improved, since this will fail if there are labels with exactly the same number of samples\n # Current solution use a bit of noise to minimize conflicts/favors\n y = self.encode_labels(y)\n lab_weights = 1. - np.mean(y, axis=0)\n noise = np.random.normal(loc=0, scale=0.0001, size=np.shape(y))\n y_condensed = np.argmax(minmax_scale(y * lab_weights + noise, axis=1), axis=1)\n return y_condensed\n\n return self.encode_labels(y)", "def assign_labels(dataset: pd.DataFrame, hypershapes: Dict[int, Dict], n_classes: int) -> pd.DataFrame:\n\n labels = np.zeros(shape=(len(dataset), n_classes))\n labels = pd.DataFrame(labels, columns=[\"l{}\".format(i) for i in range(n_classes)])\n for point, label in zip(dataset.values, labels.values):\n for cla in hypershapes:\n for shape in hypershapes[cla].values():\n if shape[\"shape\"] == \"cubes\":\n if is_point_inside_hypercube(point, shape[\"center\"], shape[\"radius\"]):\n label[int(cla)] = 1\n elif shape[\"shape\"] == \"spheres\":\n if is_point_inside_hypersphere(point, shape[\"center\"], shape[\"radius\"]):\n label[int(cla)] = 1\n elif shape[\"shape\"] == \"moons\":\n if is_point_inside_hypermoon(point, (shape[\"center_big\"], shape[\"center_small\"]),\n (shape[\"radius_big\"], shape[\"radius_small\"])):\n label[int(cla)] = 1\n\n return labels", "def add_incident_count_class_label(data, count_col=\"incidents\", num_classes=6, one_hot=True):\n def add_plus(x, value=num_classes - 1):\n if int(x) == value:\n return str(x) + \"+\"\n return x\n\n data = data.copy()\n data[\"class\"] = np.minimum(data[count_col].values, num_classes - 1)\n data[\"class\"] = data[\"class\"].astype(int).astype(str)\n data[\"class\"] = data[\"class\"].map(add_plus)\n\n # to onehot\n if one_hot:\n classes = np.sort(data[\"class\"].unique())\n data = pd.concat([data, data[\"class\"].str.get_dummies()], axis=1, ignore_index=False)\n class_labels = [\"class_{}\".format(x) for x in classes]\n data = data.rename(columns={x: \"class_{}\".format(x) for x in classes})\n \n return data, class_labels\n\n else:\n return data", "def select_n_random(data, labels, n=100):\n assert len(data) == len(labels)\n\n # TODO: sort this out for 3D data\n # p1 = torch.randperm(len(data))\n # sample_labels = labels[p1][:n]\n # sample_data = data[p1][:n]\n return data[:n], labels[:n]", "def labels(self, labels):\n self._labels = labels", "def load_labels(self, labels):\n self.labels = pd.DataFrame(labels, index=[\"label\"]).T", "def assign_labels(self, data):\n data[self.label] = self.labeler(data.index.values)", "def need_labels(self) -> None:\n raise NotImplementedError()", "def get_num_classes(labels):\n num_classes = max(labels) + 1\n missing_classes = [i for i in range(num_classes) if i not in labels]\n if len(missing_classes):\n raise ValueError('Missing samples with label value(s) '\n '{missing_classes}. Please make sure you have '\n 'at least one sample for every label value '\n 'in the range(0, {max_class})'.format(\n missing_classes=missing_classes,\n max_class=num_classes - 1))\n\n if num_classes <= 1:\n raise ValueError('Invalid number of labels: {num_classes}.'\n 'Please make sure there are at least two classes '\n 'of samples'.format(num_classes=num_classes))\n return num_classes", "def get_num_classes(labels):\n num_classes = max(labels) + 1\n missing_classes = [i for i in range(num_classes) if i not in labels]\n if len(missing_classes):\n raise ValueError('Missing samples with label value(s) '\n '{missing_classes}. Please make sure you have '\n 'at least one sample for every label value '\n 'in the range(0, {max_class})'.format(\n missing_classes=missing_classes,\n max_class=num_classes - 1))\n\n if num_classes <= 1:\n raise ValueError('Invalid number of labels: {num_classes}.'\n 'Please make sure there are at least two classes '\n 'of samples'.format(num_classes=num_classes))\n return num_classes", "def get_num_labels(self):\n return self.num_labels", "def add_nodes(self, nodes_labels):\n nodes_labels = list(nodes_labels)\n nodes_indices = [i + self.count for i in range(len(nodes_labels))]\n self.node_labels.extend(nodes_labels)\n [add_to_sets_dict(self.nodes_indices, *lab_and_n) for lab_and_n in zip(nodes_labels, nodes_indices)]\n self.graph.add_nodes_from(nodes_indices)\n self.count += len(nodes_labels)\n return nodes_indices", "def nodules_connection(label_data, label_header):\n\n\n las_labels = measure.label(label_data,\n neighbors=8,\n background=0,\n return_num=True)\n\n las_labels_nzero = np.nonzero(las_labels[0])\n [xdif, ydif, zdif] = [np.amax(las_labels_nzero[0])-np.amin(las_labels_nzero[0]),\n np.amax(las_labels_nzero[1])-np.amin(las_labels_nzero[1]),\n np.amax(las_labels_nzero[2])-np.amin(las_labels_nzero[2])]\n\n # conversion pixels to mm\n dims = label_header['pixdim']\n if label_header['xyzt_units'] == 10:\n #dimensions in mm\n print('xyzt_units=10')\n xdif=dims[1]*xdif\n ydif=dims[2]*ydif\n zdif=dims[3]*zdif\n\n\n return las_labels,[xdif,ydif,zdif]", "def update_labels(self, nidxs, y):\n\n y = np.array(y, dtype=bool)\n for n, yi in zip(nidxs, y):\n self.node_labels[n] = [self.labels[i] for i, j in enumerate(yi) if j]\n\n return self", "def make_fixed_labels(self):\n fixed_labels = []\n for dim in range(self.opt.c_dim):\n t = [0] * self.opt.c_dim\n t[dim] = 1\n t = torch.FloatTensor(t).expand([self.opt.batch_size, self.opt.c_dim])\n fixed_labels.append(t)\n return fixed_labels", "def test_keep_labels_all(self):\n # Create some arbitrary data and labels\n data = array([[1], [2], [3], [4], [5], [6]])\n labels = array([1, 1, 2, 2, 1, 2])\n\n # Create a LabeledCData object\n lcdata = LabeledCData(data, labels)\n\n self.assertTrue(array_equal(lcdata.data, data))\n self.assertTrue(array_equal(lcdata.labels, labels))\n\n # Only keep the 1 and 2 labels\n lcdata.keep_data_with_labels([1, 2])\n\n # Make sure the new data is correct\n self.assertTrue(array_equal(lcdata.data, data))\n self.assertTrue(array_equal(lcdata.labels, labels))", "def generate_labels(path_to_classes: str, path_to_dataset: str):\n\n print('Generating the labels...')\n\n path_to_labels = os.path.join(path_to_dataset, 'labels')\n\n if not os.path.isdir(path_to_labels):\n print('Creating labels folder at {}...'.format(path_to_labels))\n os.makedirs(path_to_labels)\n\n path_to_csv = os.path.join(path_to_labels, 'class_name_to_number.csv')\n path_to_txt = os.path.join(path_to_labels, 'labels.txt')\n\n # Read the list of characters into a dataframe\n classes = pd.read_csv(path_to_classes)\n\n # Write the class-label mapping to csv file\n write_class_label_map(classes, path_to_csv)\n\n # Write the labels to txt file\n write_labels_txt(pd.DataFrame(classes['Unicode']), path_to_txt)", "def _get_bbox_regression_labels(bbox_target_data, num_classes):\r\n\r\n clss = bbox_target_data[:, 0]\r\n # print (\"===============class size: \" + str(clss))\r\n bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32) #clss.size = 128 ---> bbox_targets = 128 * 84, moi roi la 1*84 dimesion\r\n bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)\r\n inds = np.where(clss > 0)[0]\r\n for ind in inds:\r\n cls = clss[ind]\r\n start = 4 * cls\r\n end = start + 4\r\n start=int(start)\r\n\tend=int(end)\r\n\tbbox_targets[ind, start:end] = bbox_target_data[ind, 1:] #gan gia tri tai class tuong ung la bbox_target_data, con lai la so 0\r\n bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS\r\n return bbox_targets, bbox_inside_weights", "def num_classes(self):\n raise NotImplementedError", "def set_labels(self, new_labels=None):\n self.labels = new_labels", "def get_labels(self):\r\n raise NotImplementedError()", "def __init__(self, classification_path):\n # TODO: Rodar novamente o KNN com a particao crisp 'otima' para reavaliar os valores de K\n self.data = list()\n self.class_data = np.loadtxt(classification_path, dtype=int)\n self.mfeat_fac_classifier = self.build_classifier(15, 0)\n self.mfeat_fou_classifier = self.build_classifier(13, 1)\n self.mfeat_kar_classifier = self.build_classifier(13, 2)", "def test_number_of_classes(simple_unet_data, number_of_classes):\n unet = models.UNet(num_classes=number_of_classes)\n output = unet(simple_unet_data)\n assert output.shape[-1] == number_of_classes", "def plot_class_distribution(labels):\n num_classes = get_num_classes(labels)\n count_map = Counter(labels)\n counts = [count_map[i] for i in range(num_classes)]\n idx = np.arange(num_classes)\n plt.bar(idx, counts, width=0.8, color='b')\n plt.xlabel('Class')\n plt.ylabel('Number of samples')\n plt.title('Class distribution')\n plt.xticks(idx, idx)\n plt.show()", "def neural_net_label_input(n_classes):\n labelstensor = tf.placeholder(tf.float32, shape=[None,n_classes], name='y')\n return labelstensor", "def getLabels(self):\n return self.numToLabel", "def get_classLabel(self, dataset, class_label): \n\t\tnode = self.root\n\t\tbroken=0\n\t\t\n\t\t#print(\"BEBE:\" + str(node.get_bebe( dataset)))\n\t\t\n\t\tif (node.get_bebe( dataset) == class_label ):\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\n\t\t\tdef junk(data, class_label, seed, ratio):", "def count_classes(labels):\n class_dict = {}\n for image in labels:\n for row in image:\n for label in row:\n if label not in class_dict:\n class_dict[label] = 1\n else:\n class_dict[label] += 1\n return class_dict", "def generate_labels(n_samples):\n return np.ones([n_samples, 1]), np.zeros([n_samples, 1])", "def create_nodes(self):", "def equalize_data(class_labels, class_data):\n a1 = class_data[np.where(class_labels == 1)[0]]\n a2 = class_data[np.where(class_labels == 2)[0]]\n max_len = min(len(a1), len(a2))\n a1 = a1[0:max_len]\n a2 = a2[0:max_len]\n arr = np.concatenate((a1, a2))\n b1 = np.ones((max_len, 1)) * 0\n b2 = np.ones((max_len, 1)) * 1\n brr = np.concatenate((b1, b2))\n combined_arr = np.append(brr, arr, axis=1)\n combined_arr = np.random.permutation(combined_arr)\n labels = np.transpose(combined_arr)[0]\n data = np.delete(combined_arr, 0, axis=1)\n return labels, data", "def __len__(self):\n\n return len(self.labels)", "def get_train_labels(self, window, scene):\n pass", "def class_labels(self):\n return self._class_labels", "def num_labels(self):\n return len(self.get_labels())", "def num_labels(self):\n return len(self.get_labels())", "def load_data_labels(datasets):\n # Split by words\n x_text = datasets['data']\n x_text = [clean_str(sent) for sent in x_text]\n # Generate labels\n labels = []\n for i in range(len(x_text)):\n label = [0 for j in datasets['target_names']]\n #print('target={}, i={}'.format(datasets['target'], i))\n label[datasets['target'][i]] = 1\n labels.append(label)\n y = np.array(labels)\n return [x_text, y]", "def verbose_data(which_set, data, label):\n text = ['{} size: {}'.format(which_set, data.shape[0])]\n for i in range(label.max() + 1):\n text.append('class{}-{}'.format(i, len(np.where(label == i)[0])))\n text.append('\\n')\n text = ' '.join(text)\n tf.logging.info(text)", "def test_labels():\n size = 5\n labels = {\"x\": \"distance\", \"y\": \"force\"}\n s = channel.Slice(channel.TimeSeries(np.random.rand(size), np.random.rand(size)), labels)\n assert s.labels == labels\n assert s[:].labels == labels\n assert s[:0].labels == labels\n assert s[:10].labels == labels\n\n s = channel.Slice(channel.TimeSeries([], []), labels)\n assert len(s) == 0\n assert s.labels == labels\n assert s[:].labels == labels", "def __init__(self, num_labels):\n super().__init__()\n self.a = torch.nn.Parameter(torch.randn(num_labels))", "def gen_random_labels(\n X: Union[np.ndarray, int], n_classes: int, pvec=None\n) -> np.ndarray:\n\n if isinstance(X, int):\n num = X\n else:\n num = X.shape[0]\n\n pvec = np.ones((n_classes,)) / n_classes\n\n return npr.multinomial(1, pvec, size=num)", "def generate_labels():\n label_set = set([])\n for data in load_data():\n label = data.split(' ', 1)[0]\n label_set.add(label)\n labels = list(label_set)\n labels.sort()\n return labels", "def nr_labels(self):\n return self.model.nr_labels", "def node_count(self, *n_labels):\n if not n_labels:\n return len(self._nodes)\n elif len(n_labels) == 1:\n return len(self._nodes_by_label.get(n_labels[0], ()))\n else:\n return sum(1 for _ in self.nodes(*n_labels))", "def fix_label_names():\n\n assert trace.cpu.trace_done\n binary_addr = memorymanager.BinaryAddr(0)\n while binary_addr < len(classifications):\n c = classifications[binary_addr]\n if c is not None:\n dummy = [str(x) for x in c.as_string_list(binary_addr, None)]\n binary_addr += c.length()\n else:\n binary_addr += 1", "def select_num_imgs_per_class(cls, examples, labels, num_imgs_in_val):\n num_classes = len(set(labels))\n\n # def partition_train_set(self, imgs, num_imgs_in_val):\n labeled = []\n labeled_y = []\n unlabeled = []\n unlabeled_y = []\n\n cts = {x: 0 for x in range(num_classes)}\n for img_name, class_idx in zip(examples, labels):\n # allow labeled\n if cts[class_idx] < num_imgs_in_val:\n labeled.append(img_name)\n labeled_y.append(class_idx)\n cts[class_idx] += 1\n else:\n unlabeled.append(img_name)\n unlabeled_y.append(class_idx)\n\n labeled = np.stack(labeled)\n\n return labeled, labeled_y", "def get_class_labels(self):\r\n \r\n y = self.get_data()['y']\r\n if type(y) == torch.Tensor:\r\n return y.unique().numpy()\r\n else:\r\n return sorted(list(set(y)))", "def __init__(self):\n\n # List of all the class labels\n self.labels = [0, 1, 2, 3]\n\n # Dictionary to store count of each label in predicted labels list\n self.total_prediction_count = {0: 0, 1: 0, 2: 0, 3: 0}\n\n # Dictionary to store count of each label in actual labels list\n self.total_actual_count = {0: 0, 1: 0, 2: 0, 3: 0}\n\n # Dictionary to store count of correctly predicted labels\n self.total_correct_prediction_count = {0: 0, 1: 0, 2: 0, 3: 0}", "def balance_classes(data, labels):\n\n index_dict = {}\n\n for idx, label in enumerate(labels):\n if label not in index_dict:\n index_dict[label] = [idx]\n else:\n index_dict[label] += [idx]\n\n index_list = list(index_dict.values())\n\n min_balanced_number = min([len(l) for l in index_list])\n\n index_to_take_list = np.concatenate([\n np.random.choice(l, min_balanced_number, replace=False)\n for l in index_list\n ])\n\n np.random.shuffle(index_to_take_list)\n\n return data[index_to_take_list], labels[index_to_take_list]", "def vis_class(X, labels, title, file_path=None):\n unique_labels = set(labels)\n colors = [plt.cm.Spectral(each)\n for each in np.linspace(0, 1, len(unique_labels))]\n\n plt.figure(figsize=(15, 12))\n for k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = [0, 0, 0, 1]\n\n class_member_mask = (labels == k)\n\n xy = X[class_member_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=14, label=k)\n plt.text(xy[0, 0], xy[0, 1], str(k), fontsize=18)\n\n # xy = X[class_member_mask & ~core_samples_mask]\n # plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n # markeredgecolor='k', markersize=6, label=k)\n plt.title(title)\n plt.legend()\n plt.tight_layout()\n if file_path:\n plt.savefig(file_path, dpi=300)", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def infer_data_labels(X_labels, cluster_labels):\r\n #Empty array of len(X)\r\n predicted_labels = np.zeros(len(X_labels)).astype(np.uint8)\r\n \r\n for i, cluster in enumerate(X_labels):\r\n for key, value in cluster_labels.items():\r\n if cluster in value:\r\n predicted_labels[i] = key\r\n \r\n return predicted_labels" ]
[ "0.63320434", "0.6277881", "0.6271124", "0.6242542", "0.6160365", "0.6119636", "0.60193723", "0.59684384", "0.59684384", "0.5925846", "0.5914046", "0.58883923", "0.58800185", "0.5870202", "0.58377177", "0.58357537", "0.5825657", "0.5825657", "0.5825657", "0.5825657", "0.5825657", "0.5825657", "0.57372254", "0.5730624", "0.5721477", "0.57053685", "0.57038534", "0.56914985", "0.5682479", "0.56781983", "0.56765467", "0.56616896", "0.5650845", "0.56492573", "0.56341124", "0.56134886", "0.5603694", "0.5598561", "0.5581215", "0.5566185", "0.5552933", "0.5541606", "0.55396837", "0.5511545", "0.5509578", "0.54992956", "0.54927236", "0.54905266", "0.5485973", "0.54811305", "0.54476035", "0.5438641", "0.5432208", "0.54299676", "0.54242957", "0.5421486", "0.54198796", "0.5416474", "0.54094034", "0.5408423", "0.5401411", "0.54009193", "0.5399154", "0.5397777", "0.539273", "0.5386566", "0.5380182", "0.53783697", "0.5374085", "0.53660125", "0.53648305", "0.53646624", "0.53636867", "0.5359127", "0.5351448", "0.53495145", "0.53479975", "0.5342202", "0.53365296", "0.5333307", "0.53330654", "0.53330654", "0.5332779", "0.53235686", "0.53225476", "0.5317846", "0.53168476", "0.5316662", "0.53141433", "0.5311736", "0.5303518", "0.53003937", "0.52979004", "0.5280933", "0.527833", "0.52769613", "0.52685285", "0.52685285", "0.52685285", "0.52673405" ]
0.56607765
32
Uses LoadData class to partition prepare data. Puts data into dataloader objects to make use of batching and shuffling.
def load_mnist_data(nr_nodes, nr_classes, allocation, subset, batch_size): train_loader_list = [] test_loader_list = [] train = LoadData('MNIST', True, subset) test = LoadData('MNIST', False, False) train_data, train_targets = train.split(allocation, nr_nodes, class_per_node=nr_classes) for data, targets in zip(train_data, train_targets): train_dataset = CustomDataset(data, targets) train_loader_list.append(DataLoader(train_dataset, batch_size=batch_size, shuffle=True)) test_data, test_targets = test.split('uniform', nr_nodes) for data, targets in zip(test_data, test_targets): test_dataset = CustomDataset(data, targets) test_loader_list.append(DataLoader(test_dataset, batch_size=batch_size, shuffle=False)) return train_loader_list, test_loader_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def _load_data(self):\n if self._name in BALANCE_DATASET:\n _loader = dataset_loaders[self._name]\n xnp, y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.y_ts = y[test_idx]\n\n else:\n _loader = dataset_loaders[self._name]\n xnp, xp, y = _loader()\n # self.xnp, self.xp, self.y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.xp_tr = xp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.xp_ts = xp[test_idx]\n self.y_ts = y[test_idx]", "def initialise_dataset_loader(\n self, data_param=None, task_param=None, data_partitioner=None):\n raise NotImplementedError", "def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)", "def _init_al_dataset(self):\n\n self._init_dataset()\n\n train_dataset = self.datasets['train']\n\n dataset_size = len(train_dataset)\n self.budget = math.ceil(self.budget_frac*dataset_size)\n Sampler.__init__(self, config, self.budget) # TODO: Weird place to initialise this\n\n all_indices = set(np.arange(dataset_size))\n k_initial = math.ceil(len(all_indices)*self.initial_budget_frac)\n initial_indices = random.sample(list(all_indices), k=k_initial)\n\n sampler_init = data.sampler.SubsetRandomSampler(initial_indices) # need to sample from training dataset\n\n self.labelled_dataloader = data.DataLoader(train_dataset, sampler=sampler_init, batch_size=self.batch_size, drop_last=True)\n self.val_dataloader = data.DataLoader(self.datasets['valid'], batch_size=self.batch_size, drop_last=False)\n self.test_dataloader = data.DataLoader(self.datasets['test'], batch_size=self.batch_size, drop_last=False)\n\n return all_indices, initial_indices", "def prepare_data(self,d):\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters", "def prepare_dataloader(opt, dataobj):\n\n def load_data(name):\n with open(name, 'rb') as f:\n data = pickle.load(f)\n num_types = 1 # There is no event type prediction, hence using a dummy value, this will basically be a constant value field\n return data, num_types\n\n print('[Info] Loading train data...')\n train_data, num_types = load_data(opt.data + 'train_ny.pkl')\n print('[Info] Loading dev data...')\n val_data, _ = load_data(opt.data + 'val_ny.pkl')\n print('[Info] Loading test data...')\n test_data, _ = load_data(opt.data + 'test_ny.pkl')\n\n trainloader = get_dataloader(train_data, opt.batch_size, shuffle=True)\n validationloader = get_dataloader(val_data, opt.batch_size, shuffle=True)\n testloader = get_dataloader(test_data, opt.batch_size, shuffle=False)\n return trainloader, validationloader, testloader, num_types", "def get_precomp_loader(data_path, data_split, opt, batch_size=100,\n shuffle=True, num_workers=16):\n dset = PrecompDataset(data_path, data_split, opt)\n\n data_loader = torch.utils.data.DataLoader(dataset=dset,\n batch_size=batch_size,\n shuffle=shuffle,\n pin_memory=True,\n collate_fn=collate_fn,num_workers = num_workers)\n return data_loader", "def prepare_dataset(\n db, dataset: str, batch_size, chunk_size, shuffle=True,\n prefetch=True, dataset_slice=None,\n):\n dataset = db.get_dataset(dataset)\n\n if dataset_slice is not None:\n dataset = dataset[dataset_slice]\n\n segmenter = Segmenter(\n chunk_size, include_keys=('y', 's'), axis=-1,\n anchor='random' if shuffle else 'left',\n )\n\n def _set_num_samples(example):\n example['num_samples'] = example['y'].shape[-1]\n return example\n\n if shuffle:\n dataset = dataset.shuffle(reshuffle=True)\n\n dataset = dataset.map(pre_batch_transform)\n dataset = dataset.map(segmenter)\n\n # FilterExceptions are only raised inside the chunking code if the\n # example is too short. If chunk_size == -1, no filter exception is raised.\n catch_exception = segmenter.length > 0\n if prefetch:\n dataset = dataset.prefetch(\n 8, 16, catch_filter_exception=catch_exception)\n elif catch_exception:\n dataset = dataset.catch()\n\n if chunk_size != -1:\n dataset = dataset.unbatch()\n else:\n def unbatch(example):\n assert len(example) == 1, example\n return example[0]\n dataset = dataset.map(unbatch)\n dataset = dataset.map(_set_num_samples)\n\n if shuffle:\n dataset = dataset.shuffle(reshuffle=True, buffer_size=128)\n\n dataset = dataset.batch(batch_size)\n dataset = dataset.map(pt.data.batch.Sorter('num_samples'))\n dataset = dataset.map(pt.data.utils.collate_fn)\n\n return dataset", "def get_loader(dataset='train.txt', crop_size=128, image_size=28, batch_size=2, mode='train', num_workers=1): \n transform = [] \n if mode == 'train': \n transform.append(transforms.RandomHorizontalFlip()) \n transform.append(transforms.CenterCrop(crop_size)) \n transform.append(transforms.Resize(image_size)) \n transform.append(transforms.ToTensor()) \n transform.append(transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))) \n transform = transforms.Compose(transform) \n train_data=MyDataset(txt=dataset, transform=transform) \n data_loader = DataLoader(dataset=train_data, \n batch_size=batch_size, \n shuffle=(mode=='train'), \n num_workers=num_workers) \n return data_loader", "def prepare(self):\n if self.opts['verbose']:\n print(\"Preparing dataset (one-time operation)...\")\n # Create paths files and load them back in\n self._build_ID_sets()\n self._create_ID_files()\n self._load_ID_files()\n if self.opts['verbose']:\n print(\"... done with preparing the dataset.\")", "def _split_data(self):\n\n # Set training data\n self.train_data = torchvision.datasets.ImageFolder(\n os.path.join(self.path, 'train'),\n transform=self._transform()\n )\n self.classes = self.train_data.classes\n\n # Set validation data\n self.val_data = torchvision.datasets.ImageFolder(\n os.path.join(self.path, 'test'),\n transform=self._transform(train=False)\n )", "def make_loader(dataset, train_batch_size, validation_split=0.2):\n # number of samples in train and test set\n train_len = int(len(dataset) * (1 - validation_split))\n test_len = len(dataset) - train_len\n train_set, test_set = torch.utils.data.random_split(dataset, [train_len, test_len])\n # create train_loader\n train_loader = torch.utils.data.DataLoader(\n train_set, batch_size=train_batch_size, shuffle=True,\n )\n # create test_loader\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False,)\n return train_loader, test_loader", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=True,\n num_workers=multiprocessing.cpu_count(),\n )", "def load_data(self,split='train'):\n raise NotImplementedError", "def init_loaders(self, *args, **kwargs):\n\n # Convert the data to Dataset\n dataset_dict = self.init_datasets(*args, **kwargs)\n\n # If the Dataset implements collate_fn, that is used. Otherwise, default_collate is used\n if hasattr(dataset_dict[\"train\"], \"collate_fn\") and callable(\n getattr(dataset_dict[\"train\"], \"collate_fn\")\n ):\n collate_fn = dataset_dict[\"train\"].collate_fn\n else:\n collate_fn = default_collate\n\n # If 'iters_per_epoch' is defined, then a fixed number of random sample batches from the training set\n # are drawn per epoch.\n # Otherwise, an epoch is defined by a full run through all of the data in the dataloader.\n #\n if self.config_dict.get(\"iters_per_epoch\") is not None:\n num_samples = (\n self.config_dict[\"iters_per_epoch\"] * self.config_dict[\"batch_size\"]\n )\n loaders_dict = {}\n for key in dataset_dict.keys():\n if key == \"train\":\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_sampler=BatchSampler(\n RandomSampler(\n dataset_dict[key],\n replacement=True,\n num_samples=num_samples,\n ),\n batch_size=self.config_dict[\"batch_size\"],\n drop_last=False,\n ),\n collate_fn=collate_fn,\n )\n else:\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n else:\n loaders_dict = {\n key: DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n for key in data_dict.keys()\n }\n\n return loaders_dict", "def get_precomp_loader(data_path, data_split, vocab, opt, batch_size=100,\r\n shuffle=True, num_workers=2):\r\n\r\n dset = PrecompDataset(data_path, data_split, vocab)\r\n # train_sampler = torch.utils.data.distributed.DistributedSampler(dset)\r\n # if data_split == 'train':\r\n # data_loader = DataLoader(dataset=dset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True, collate_fn=collate_fn, sampler=train_sampler)\r\n # else:\r\n data_loader = DataLoaderX(dataset=dset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=True, collate_fn=collate_fn)\r\n\r\n return data_loader", "def _init_train_loader(self):\n # Choose the right dataset type\n if self.config_args[\"num_members\"] > 1:\n class_dataset_wrapper = dataset_wrapper.MixMoDataset\n else:\n class_dataset_wrapper = dataset_wrapper.MSDADataset\n\n # Load augmentations\n self.traindatasetwrapper = class_dataset_wrapper(\n dataset=self.train_dataset,\n num_classes=int(self.config_args[\"data\"][\"num_classes\"]),\n num_members=self.config_args[\"num_members\"],\n dict_config=self.config_args[\"training\"][\"dataset_wrapper\"],\n properties=self.properties\n )\n\n # Build standard sampler\n _train_sampler = torch.utils.data.sampler.RandomSampler(\n data_source=self.traindatasetwrapper, ## only needed for its length\n num_samples=None,\n replacement=False,\n )\n\n # Wrap it with the repeating sampler used for multi-input models\n batch_sampler = batch_repetition_sampler.BatchRepetitionSampler(\n sampler=_train_sampler,\n batch_size=self.batch_size,\n num_members=self.config_args[\"num_members\"],\n drop_last=True,\n config_batch_sampler=self.config_args[\"training\"][\"batch_sampler\"]\n )\n\n self.train_loader = torch.utils.data.DataLoader(\n self.traindatasetwrapper,\n batch_sampler=batch_sampler,\n num_workers=self.num_workers,\n batch_size=1,\n shuffle=False,\n sampler=None,\n drop_last=False,\n pin_memory=True,\n )", "def _custom_data_loader(self) -> DataLoader:\n dataloaders = DataLoader(self.dataset, batch_size=1)\n return dataloaders", "def get_data_loaders(data, batch_size, ratio=0.8, num_workers=1):\n train_size = int(len(data) * ratio)\n val_size = len(data) - train_size\n train_set, val_set = random_split(data, [train_size, val_size])\n data_train_loader = DataLoader(train_set, batch_size=batch_size, num_workers=num_workers, shuffle=True)\n data_val_loader = DataLoader(val_set, batch_size=batch_size, num_workers=num_workers, shuffle=True)\n return data_train_loader, data_val_loader", "def dataio_prepare(hparams):\n data_folder = hparams[\"data_folder\"]\n\n train_data = dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"train_data\"],\n replacements={\"data_root\": data_folder}, )\n\n if hparams[\"sorting\"] == \"ascending\":\n # we sort training data to speed up training and get better results.\n train_data = train_data.filtered_sorted(sort_key=\"duration\")\n # when sorting do not shuffle in dataloader ! otherwise is pointless\n hparams[\"train_dataloader_opts\"][\"shuffle\"] = False\n\n elif hparams[\"sorting\"] == \"descending\":\n train_data = train_data.filtered_sorted(\n sort_key=\"duration\", reverse=True)\n # when sorting do not shuffle in dataloader ! otherwise is pointless\n hparams[\"train_dataloader_opts\"][\"shuffle\"] = False\n\n elif hparams[\"sorting\"] == \"random\":\n pass\n\n else:\n raise NotImplementedError(\n \"sorting must be random, ascending or descending\")\n\n valid_data = dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"valid_data\"],\n replacements={\"data_root\": data_folder}, )\n valid_data = valid_data.filtered_sorted(sort_key=\"duration\")\n\n test_data = dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"test_data\"],\n replacements={\"data_root\": data_folder}, )\n test_data = test_data.filtered_sorted(sort_key=\"duration\")\n\n datasets = [train_data, valid_data, test_data]\n\n # Defining tokenizer and loading it\n tokenizer = transformers.BertTokenizer.from_pretrained('bert-base-chinese')\n\n # 2. Define audio pipeline:\n @data_pipeline.takes(\"wav\")\n @data_pipeline.provides(\"sig\")\n def audio_pipeline(wav):\n sig = dataio.read_audio(wav)\n return sig\n\n dataset.add_dynamic_item(datasets, audio_pipeline)\n\n # 3. Define text pipeline:\n @data_pipeline.takes(\"transcript\")\n @data_pipeline.provides(\"wrd\", \"tokens_list\", \"tokens\")\n def text_pipeline(wrd):\n wrd = \"\".join(wrd.split(\" \"))\n yield wrd\n tokens_list = tokenizer(wrd)[\"input_ids\"]\n yield tokens_list\n tokens = numpy.array(tokens_list, dtype=\"int64\")\n yield tokens\n\n dataset.add_dynamic_item(datasets, text_pipeline)\n\n # 4. Set output:\n dataset.set_output_keys(\n datasets,\n [\"id\", \"sig\", \"wrd\", \"tokens\"], )\n\n # 5. If Dynamic Batching is used, we instantiate the needed samplers.\n train_batch_sampler = None\n valid_batch_sampler = None\n if hparams[\"dynamic_batching\"]:\n from sampler import DynamicBatchSampler # noqa\n\n dynamic_hparams = hparams[\"dynamic_batch_sampler\"]\n num_buckets = dynamic_hparams[\"num_buckets\"]\n\n train_batch_sampler = DynamicBatchSampler(\n train_data,\n dynamic_hparams[\"max_batch_len\"],\n num_buckets=num_buckets,\n length_func=lambda x: x[\"duration\"],\n shuffle=dynamic_hparams[\"shuffle_ex\"],\n batch_ordering=dynamic_hparams[\"batch_ordering\"], )\n\n valid_batch_sampler = DynamicBatchSampler(\n valid_data,\n dynamic_hparams[\"max_batch_len\"],\n num_buckets=num_buckets,\n length_func=lambda x: x[\"duration\"],\n shuffle=dynamic_hparams[\"shuffle_ex\"],\n batch_ordering=dynamic_hparams[\"batch_ordering\"], )\n\n return (train_data, valid_data, test_data, tokenizer, train_batch_sampler,\n valid_batch_sampler, )", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def prepare_dataloaders(data,\n seq_len,\n batch_size=64,\n validation_set=False,\n validation_size=0.1,\n random_seed=42):\n vocab = set(data)\n token2id = {k: v for v, k in enumerate(vocab)}\n id2token = {k: v for v, k in token2id.items()}\n data_range = range(0, len(data) - seq_len, seq_len)\n\n data = [token2id[t] for t in data]\n data = np.array([data[i:i + seq_len] for i in data_range])\n tensor_data = torch.from_numpy(data)\n\n if validation_set:\n np.random.seed(random_seed)\n idx = np.random.choice(\n range(len(tensor_data)), size=len(tensor_data), replace=False)\n split = int(len(idx) * (1 - validation_size))\n train_idx = idx[:split]\n valid_idx = idx[split:]\n\n train_data = TensorDataset(torch.LongTensor(tensor_data[train_idx]))\n valid_data = TensorDataset(torch.LongTensor(tensor_data[valid_idx]))\n\n train_loader = DataLoader(\n train_data, shuffle=True, batch_size=batch_size)\n valid_loader = DataLoader(\n valid_data, shuffle=True, batch_size=batch_size)\n\n return train_loader, valid_loader, vocab, token2id, id2token\n else:\n train_data = TensorDataset(torch.LongTensor(tensor_data))\n train_loader = DataLoader(\n train_data, shuffle=True, batch_size=batch_size)\n return train_loader, vocab, token2id, id2token", "def prepare(self):\n\n # step 0: load only when not loaded yet\n if TRAINING in self.data \\\n and VALIDATION in self.data:\n return\n\n # step 1: load the file names\n file_list = sorted(glob.glob(self.location+\"*.mhd\"))\n # count the number of data points\n\n # make a stratified validation set\n # note, the seed decides the validation set, but it is deterministic in the names\n random.seed(317070)\n patient_names = [self.patient_name_from_file_name(f) for f in file_list]\n validation_patients = random.sample(patient_names, int(VALIDATION_SET_SIZE*len(patient_names)))\n\n # make the static data empty\n for s in self.datasets:\n self.data[s] = []\n self.labels[s] = []\n self.names[s] = []\n\n # load the filenames and put into the right dataset\n labels_as_dict = defaultdict(list)\n\n with open(paths.LUNA_LABELS_PATH, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(reader) # skip the header\n for row in reader:\n label = (float(row[1]), float(row[2]), float(row[3]), float(row[4]))\n labels_as_dict[str(row[0])].append(label)\n\n for patient_file in file_list:\n patient_name = self.patient_name_from_file_name(patient_file)\n\n if patient_name in validation_patients:\n s = VALIDATION\n else:\n s = TRAINING\n label = labels_as_dict[str(patient_name)]\n if self.only_positive and not label:\n continue\n self.data[s].append(patient_file)\n \n if self.pick_nodule:\n self.labels[s].append([random.choice(label)]) \n else:\n self.labels[s].append(label)\n \n \n self.names[s].append(patient_name)\n\n # give every patient a unique number\n last_index = -1\n for s in self.datasets:\n self.indices[s] = range(last_index+1,last_index+1+len(self.data[s]))\n if len(self.indices[s]) > 0:\n last_index = self.indices[s][-1]\n print s, len(self.indices[s]), \"samples\"", "def get_dataloader(data_folder, model_name, data_name, size=\"default\"):\n training_set = None\n validation_set = None\n\n if model_name == \"Howe_Patterson\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder)\n validation_set = Dataset_full(partition['validation'], data_folder)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"Deep_Sleep\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n # TODO combined dataset https://discuss.pytorch.org/t/train-simultaneously-on-two-datasets/649/17\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"ConvNet_IID\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition_IID_windows.pkl')))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition_IID_windows.pkl'))\n if data_name == \"SHHS\":\n training_set = Dataset_IID_window_SHHS(partition['train'], data_folder)\n validation_set = Dataset_IID_window_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_IID_window(partition['train'], data_folder)\n validation_set = Dataset_IID_window(partition['validation'], data_folder)\n elif data_name == \"philips\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_IID_window(partition[0]['train'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['train'], data_folder[1]))\n validation_set = ConcatDataset(\n Dataset_IID_window(partition[0]['validation'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['validation'], data_folder[1]))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n\n else:\n print(\"{} wrong model for dataloader\".format(model_name))\n exit()\n\n return training_set, validation_set", "def __init__(self, dataset, batch_size, n_threads=4,\n\t ten_crop=False, data_path='/home/dataset/', logger=None):\n\t\tself.dataset = dataset\n\t\tself.batch_size = batch_size\n\t\tself.n_threads = n_threads\n\t\tself.ten_crop = ten_crop\n\t\tself.data_path = data_path\n\t\tself.logger = logger\n\t\tself.dataset_root = data_path\n\t\t\n\t\tself.logger.info(\"|===>Creating data loader for \" + self.dataset)\n\t\t\n\t\tif self.dataset in [\"cifar100\"]:\n\t\t\tself.train_loader, self.test_loader = self.cifar(\n\t\t\t\tdataset=self.dataset)\n\n\t\telif self.dataset in [\"cifar10\"]:\n\t\t\tself.train_loader, self.test_loader = self.cifar(\n dataset=self.dataset)\n\t\t\n\t\telif self.dataset in [\"imagenet\"]:\n\t\t\tself.train_loader, self.test_loader = self.imagenet(\n\t\t\t\tdataset=self.dataset)\n\t\telse:\n\t\t\tassert False, \"invalid data set\"", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def prepare_data_loaders(num_split, batch_size=32, hier=False, elmo=False, elmo_pre=None, use_elmo_pre=False, deepmoji=False, dev_with_label=False, include_test=False):\n train_data_loaders = []\n val_data_loaders = []\n test_data_loaders = []\n\n vocab = generate_vocab(deepmoji)\n for i in range(num_split):\n train, val, test, _ = prepare_data(batch_size=batch_size, hier=hier, elmo=elmo, elmo_pre=elmo_pre, use_elmo_pre=use_elmo_pre, deepmoji=deepmoji, is_shuffle=True, random_state=i, vocab=vocab, dev_with_label=dev_with_label, include_test=include_test)\n train_data_loaders.append(train)\n val_data_loaders.append(val)\n test_data_loaders.append(test)\n\n return train_data_loaders, val_data_loaders, test_data_loaders, vocab", "def pre_process_data(self, all_labels, all_data):\n\n # [1] Normalizes data\n all_data = self.pre_precess_manager.normalization(all_data)\n\n data_train, data_test, label_train, label_test = train_test_split(all_data, all_labels, test_size=0.1,\n shuffle=True)\n\n return data_train, data_test, label_train, label_test", "def build_training_data_loader(self) -> DataLoader:\n pass", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def prepare_data_for_training(args):\n # Form the train/test splits and write them to disk\n dataset = data.Dataset(args)\n # get image classes and image counts in each class\n label_map = dataset.get_class_info()\n class_count = len(list(label_map.values()))\n # split the data and store it in log dir\n df_train, df_test = dataset.split_dataset()\n\n # perform dataset augmentations\n image_data = augment.Augmentation(args)\n # get the data gens for training and test images\n train_data_gen, _ = image_data.map_fn_train(df_train)\n test_data_gen, _ = image_data.map_fn_test(df_test)\n\n return train_data_gen, test_data_gen, df_train, df_test, class_count", "def create_loader(self):\n # load data to memory.\n if self.is_cifar100:\n (x_train, y_train), (x_test,\n y_test) = tf.keras.datasets.cifar100.load_data()\n else:\n (x_train, y_train), (x_test,\n y_test) = tf.keras.datasets.cifar10.load_data()\n\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n\n x_train, y_train = shuffle_dataset(x_train, y_train)\n n_probe = int(math.floor(x_train.shape[0] * FLAGS.probe_dataset_hold_ratio))\n\n # TODO(zizhaoz): add other noise types.\n if 'asymmetric' in self.dataset_name:\n assert 'cifar100' not in self.dataset_name, 'Asymmetric only has CIFAR10'\n (x_train, y_train, y_gold), (x_probe, y_probe) = load_asymmetric(\n x_train,\n y_train,\n noise_ratio=self.noise_ratio,\n n_val=n_probe,\n random_seed=FLAGS.seed)\n elif 'uniform' in self.dataset_name:\n (x_train, y_train, y_gold), (x_probe,\n y_probe) = load_train_val_uniform_noise(\n x_train,\n y_train,\n n_classes=self.num_classes,\n noise_ratio=self.noise_ratio,\n n_val=n_probe)\n else:\n assert self.dataset_name in ['cifar10', 'cifar100']\n\n if not self.split_probe and x_probe is not None:\n # Usually used for supervised comparison.\n tf.logging.info('Merge train and probe')\n x_train = np.concatenate([x_train, x_probe], axis=0)\n y_train = np.concatenate([y_train, y_probe], axis=0)\n y_gold = np.concatenate([y_gold, y_probe], axis=0)\n\n conf_mat = sklearn_metrics.confusion_matrix(y_gold, y_train)\n conf_mat = conf_mat / np.sum(conf_mat, axis=1, keepdims=True)\n tf.logging.info('Corrupted confusion matirx\\n {}'.format(conf_mat))\n x_test, y_test = shuffle_dataset(x_test, y_test)\n self.train_dataset_size = x_train.shape[0]\n self.val_dataset_size = x_test.shape[0]\n if self.split_probe:\n self.probe_size = x_probe.shape[0]\n\n input_tuple = (x_train, y_train.squeeze())\n self.train_dataflow = self.create_ds(input_tuple, is_train=True)\n self.val_dataflow = self.create_ds((x_test, y_test.squeeze()),\n is_train=False)\n if self.split_probe:\n self.probe_dataflow = self.create_ds((x_probe, y_probe.squeeze()),\n is_train=True)\n\n tf.logging.info('Init [{}] dataset loader'.format(self.dataset_name))\n verbose_data('train', x_train, y_train)\n verbose_data('test', x_test, y_test)\n if self.split_probe:\n verbose_data('probe', x_probe, y_probe)\n\n return self", "def prepare_data(path_to_data=PATH_TO_DATA, batch_size=BATCH_SIZE,\n img_size=IMG_SIZE, subsample=None, shuffle=True):\n if subsample is not None:\n idx = np.arange(5000)\n np.random.shuffle(idx)\n sampler = SubsetRandomSampler(idx[:subsample])\n shuffle = False\n else:\n sampler = None\n transform = transforms.Compose([\n # transforms.RandomHorizontalFlip(p=0.5),\n # transforms.RandomRotation((-20, 20)),\n # transforms.Resize(img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n training_data = torchvision.datasets.ImageFolder(\n path_to_data, transform=transform)\n print(\"Length of data: \", len(training_data))\n\n training_loader = torch.utils.data.DataLoader(\n training_data, batch_size=batch_size,\n sampler=sampler, shuffle=shuffle)\n return training_loader", "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def data_loader(root, batch_size=64):\n input_transform = get_transform()\n dataset = CustomDataset(root, input_transform)\n return data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=False)", "def prepare(self):\n bcolz.set_nthreads(2)\n\n # step 0: load only when not loaded yet\n if TRAINING in self.data and VALIDATION in self.data: return\n\n # step 1: load the file names\n patients = sorted(glob.glob(self.location+'/*.*/'))\n print len(patients), \"patients\"\n\n # step 1: load the file names\n # make a stratified validation set\n # note, the seed decides the validation set, but it is deterministic in the names\n random.seed(317070)\n patient_names = [self.patient_name_from_file_name(f) for f in patients]\n validation_patients = random.sample(patient_names, int(VALIDATION_SET_SIZE*len(patient_names)))\n\n labels_as_dict = defaultdict(list)\n\n with open(paths.LUNA_LABELS_PATH, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(reader) # skip the header\n for row in reader:\n label = (float(row[1]), float(row[2]), float(row[3]), float(row[4]))\n labels_as_dict[str(row[0])].append(label)\n\n # make the static data empty\n for s in self.datasets:\n self.data[s] = []\n self.labels[s] = []\n self.names[s] = []\n self.spacings[s] = []\n self.origins[s] = []\n\n with gzip.open(paths.INTERMEDIATE_DATA_PATH + 'spacings.pkl.gz') as f:\n spacings = cPickle.load(f)\n\n with gzip.open(paths.INTERMEDIATE_DATA_PATH + 'origins.pkl.gz') as f:\n origins = cPickle.load(f)\n\n # load the filenames and put into the right dataset\n for i, patient_folder in enumerate(patients):\n patient_id = str(patient_folder.split(path.sep)[-2])\n if patient_id in validation_patients:\n dataset = VALIDATION\n else:\n dataset = TRAIN\n\n\n label = labels_as_dict[patient_id]\n if self.only_positive and not label:\n continue\n\n self.data[dataset].append(patient_folder)\n self.labels[dataset].append(label)\n self.names[dataset].append(patient_id)\n self.spacings[dataset].append(spacings[patient_id])\n self.origins[dataset].append(origins[patient_id])\n\n # give every patient a unique number\n last_index = -1\n for set in self.datasets:\n self.indices[set] = range(last_index+1,last_index+1+len(self.data[set]))\n if len(self.indices[set]) > 0:\n last_index = self.indices[set][-1]\n print set, len(self.indices[set]), \"samples\"", "def get_precomp_loader(data_path, data_split, vocab, opt, batch_size=100,\n shuffle=True, num_workers=2):\n dset = PrecompDataset(data_path, data_split, vocab)\n\n data_loader = torch.utils.data.DataLoader(dataset=dset,\n batch_size=batch_size,\n shuffle=shuffle,\n pin_memory=True,\n collate_fn=collate_fn)\n return data_loader", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def prepareDataBatches(self, traindata, trainlabel):\n index = np.random.permutation(len(traindata))\n traindata = traindata[index]\n trainlabel = trainlabel[index]\n split_no = int(len(traindata) / self.batchSize)\n return zip(np.split(traindata[:split_no*self.batchSize], split_no), np.split(trainlabel[:split_no*self.batchSize], split_no))", "def batch_loader(data_set: Union[IterableDataset, Dataset],\n batch_size: bool,\n shuffle=False) -> DataLoader:\n return DataLoader(\n data_set,\n batch_size=batch_size,\n collate_fn=lambda x: x,\n shuffle=shuffle\n )", "def data_loader(data, train=True):\n\n loader_config = {\n 'batch_size':64,\n 'shuffle':train\n }\n \n return torch.utils.data.DataLoader(data, **loader_config)", "def data_loaders(dataset_path):\n dataset_path = dataset_path\n news_stock_dataset = NewsStockDataLoader(dataset_path)\n \n dataset_size = len(news_stock_dataset)\n indices = list(range(dataset_size))\n training_split = int(0.8 * dataset_size)\n validation_split = int(0.9 * dataset_size)\n\n np.random.seed(96)\n np.random.shuffle(indices)\n\n train_indices = indices[:training_split]\n valid_indices = indices[training_split:validation_split]\n test_indices = indices[validation_split:]\n\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(valid_indices)\n test_sampler = SubsetRandomSampler(test_indices)\n \n collate = PadSequence()\n\n training_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"training_batch_size\"),\n sampler = train_sampler,\n collate_fn = collate)\n\n validation_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"validation_batch_size\"),\n sampler = valid_sampler,\n collate_fn = collate)\n\n testing_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"testing_batch_size\"),\n sampler= test_sampler,\n collate_fn = collate)\n \n return training_loader, validation_loader, testing_loader", "def load_data(self,split='train'):\n raise ValueError('Please implement me!')", "def split_data(self):\n if not self.load_data:\n raise AttributeError('Preprocessor has not loaded any data.')\n \n # 3 - Find example counts for each set\n self.n_examples = self.data[0].shape[0]\n self.n_train = int(self.n_examples * self.train_ratio)\n self.n_val = int(self.n_examples * self.val_ratio)\n self.n_test = self.n_examples - self.n_train - self.n_val\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')\n if self.n_test < 0:\n raise ValueError('Train + validation ratios must bef < 1')\n\n # 4 - Separate data into train, test, val\n if isinstance(self.data[0], pd.DataFrame):\n logger.info('Dataset is in a dataframe.')\n self.isdataframe = True\n\n self.train_data = [self.data[0].iloc[:self.n_train],\n self.data[1].iloc[:self.n_train]]\n \n self.val_data = [self.data[0].iloc[self.n_train:self.n_val + self.n_train],\n self.data[1].iloc[self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0].iloc[self.n_val + self.n_train:],\n self.data[1].iloc[self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n else:\n self.isdataframe = False\n logger.info('Dataset is in a numpy array.')\n \n # If datasets are numpy array or sparse\n self.train_data = [self.data[0][:self.n_train],\n self.data[1][:self.n_train]]\n \n self.val_data = [self.data[0][self.n_train:self.n_val + self.n_train],\n self.data[1][self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0][self.n_val + self.n_train:],\n self.data[1][self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n \n assert(self.n_train == self.train_data[0].shape[0])\n assert(self.n_val == self.val_data[0].shape[0])\n assert(self.n_test == self.test_data[0].shape[0])\n \n # Free memory\n del self.data\n \n if self.save_sets:\n self.save_datasets()", "def partition_dataset_train():\n dataset = datasets.MNIST(\n './data',\n train=True,\n download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))\n ]))\n size = dist.get_world_size()\n bsz = int(128 / float(size))\n partition_sizes = [1.0 / size for _ in range(size)]\n partition = DataPartitioner(dataset, partition_sizes)\n partition = partition.use(dist.get_rank())\n train_set = torch.utils.data.DataLoader(\n partition, batch_size=bsz, shuffle=True)\n return train_set, bsz", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def train_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_train, **self.dl_kwargs)", "def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=False,\n num_workers=multiprocessing.cpu_count(),\n )", "def create_dataloader(data):\r\n input_ids = torch.LongTensor([sent['input_ids'] for sent in data])\r\n input_mask = torch.LongTensor([sent['input_mask'] for sent in data])\r\n segment_ids = torch.LongTensor([sent['segment_ids'] for sent in data])\r\n label_ids = torch.LongTensor([sent['label_ids'] for sent in data])\r\n\r\n dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids)\r\n\r\n train_sampler = RandomSampler(dataset)\r\n dataloader = DataLoader(dataset, sampler=train_sampler, batch_size=BATCH_SIZE)\r\n\r\n return dataloader", "def data_process(self):\n logging.info('Processing the data and split files')\n lines = Utility.file_len(self.fname)\n self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,\n cpu_count().real)", "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def load_data(batch_size=batch_size):\n trainset = LibriSpeechDataset(training_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds))\n testset = LibriSpeechDataset(validation_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds), stochastic=False)\n\n train_loader = DataLoader(trainset, batch_size=batch_size, num_workers=1, shuffle=True, drop_last=True)\n test_loader = DataLoader(testset, batch_size=1, num_workers=1, drop_last=True)\n\n return train_loader, test_loader", "def prep_data(dataset_config=None):\n \n\n data_struct = DataStruct(dataset_config)\n\n data_name = data_struct.name\n datasets_dir = data_struct.data_path\n out_dir = data_struct.save_path\n\n # If dataset already downloaded an unpacked, do nothing\n if os.path.isdir(out_dir):\n print('{} already downloaded, unpacked and processed.'.format(data_name))\n return\n\n # Check if download is required\n data_url = data_struct.url\n compressed_file_name = downloader(datasets_dir, data_url)\n\n # Unpack compressed dataset file\n unpacker(compressed_file_name, out_dir)\n\n # Custom preprocessing steps for data sets\n custom_preprocessor(out_dir)", "def build_data_loader(dataset, micro_batch_size, num_workers, drop_last,\n task_collate_fn=None):\n\n # Sampler.\n world_size = mpu.get_data_parallel_world_size()\n rank = mpu.get_data_parallel_rank()\n sampler = torch.utils.data.distributed.DistributedSampler(\n dataset, num_replicas=world_size, rank=rank)\n\n # Data loader. Note that batch size is the per GPU batch size.\n data_loader = torch.utils.data.DataLoader(dataset,\n batch_size=micro_batch_size,\n sampler=sampler,\n shuffle=False,\n num_workers=num_workers,\n drop_last=drop_last,\n pin_memory=True,\n collate_fn=task_collate_fn)\n\n return data_loader", "def create_split_loaders(root_dir, batch_size, seed=0, transform=transforms.ToTensor(),\n p_val=0.1, p_test=0.2, shuffle=True, \n show_sample=False, extras={}):\n \n\n # once all single json datasets are created you can concat them into a single one:\n quickdraw_dataset = CharacterDataset(root_dir=root_dir, transform=transform)\n \n # Dimensions and indices of training set\n dataset_size = len(quickdraw_dataset)\n all_indices = list(range(dataset_size))\n\n # Shuffle dataset before dividing into training & test sets\n if shuffle:\n np.random.seed(seed)\n np.random.shuffle(all_indices)\n \n # Create the validation split from the full dataset\n val_split = int(np.floor(p_val * dataset_size))\n train_ind, val_ind = all_indices[val_split :], all_indices[: val_split]\n \n # Separate a test split from the training dataset\n test_split = int(np.floor(p_test * len(train_ind)))\n train_ind, test_ind = train_ind[test_split :], train_ind[: test_split]\n print(len(train_ind), len(val_ind), len(test_ind))\n # Use the SubsetRandomSampler as the iterator for each subset\n sample_train = SubsetRandomSampler(train_ind)\n sample_test = SubsetRandomSampler(test_ind)\n sample_val = SubsetRandomSampler(val_ind)\n\n num_workers = 0\n pin_memory = False\n # If CUDA is available\n if extras:\n num_workers = extras[\"num_workers\"]\n pin_memory = extras[\"pin_memory\"]\n \n # Define the training, test, & validation DataLoaders\n train_loader = DataLoader(quickdraw_dataset, batch_size=batch_size, \n sampler=sample_train, num_workers=num_workers, \n pin_memory=pin_memory)\n\n test_loader = DataLoader(quickdraw_dataset, batch_size=batch_size, \n sampler=sample_test, num_workers=num_workers, \n pin_memory=pin_memory)\n\n val_loader = DataLoader(quickdraw_dataset, batch_size=batch_size,\n sampler=sample_val, num_workers=num_workers, \n pin_memory=pin_memory)\n\n \n # Return the training, validation, test DataLoader objects\n return (train_loader, val_loader, test_loader)", "def load_data(config, vocab, proportion: float=0.7, max_len: int=256, partition: dict=None, labels: dict=None):\n\n # columns if meta: [0] unique ID, [1] text, [2] metadata, [3] label\n # columns if no meta: [0] unique ID, [1] text, [2] label\n\n if config[\"metadata\"]:\n unique_id_col = 0\n text_col = 1\n metadata_col = 2\n label_col = 3\n else: \n unique_id_col = 0\n text_col = 1\n label_col = 3\n\n dataset = pd.read_csv(config['train_file'], header=None, sep='\\t')\n print(dataset)\n # below fix null values wrecking encode_plus\n\n # convert labels to integer and drop nas\n\n dataset.iloc[:, label_col] = pd.to_numeric(dataset.iloc[:, label_col], errors = 'coerce' )\n dataset = dataset[~ dataset[text_col].isnull()]\n\n # recreate the first column with the reset index.\n dataset = dataset[(dataset.iloc[:, label_col] == 1) | (dataset.iloc[:, label_col] == 0)] \\\n .reset_index().reset_index().drop(columns = ['index', 0]).rename(columns = {'level_0': 0})\n\n print(dataset)\n\n # create list of train/valid IDs if not provided\n if not partition and not labels:\n ids = list(dataset.iloc[:,unique_id_col])\n\n total_len = len(ids)\n np.random.shuffle(ids)\n\n labels = {}\n # metadata = {}\n \n partition = {'train': ids[ :int(total_len * 0.7)],\n 'valid': ids[int(total_len * 0.7): ]\n }\n for i in dataset.iloc[:, unique_id_col]:\n labels[i] = dataset.iloc[i][label_col]\n\n # set parameters for DataLoader -- num_workers = cores\n params = {'batch_size': 32,\n 'shuffle': True,\n 'num_workers': 0\n }\n\n tokenizer = AutoTokenizer.from_pretrained(vocab)\n dataset[text_col] = dataset[text_col].apply(lambda x: tokenizer.encode_plus(str(x), \\\n max_length=max_len, \\\n add_special_tokens=True, \\\n pad_to_max_length=True, \\\n truncation=True))\n\n if config['metadata']: # glove for metadata preprocessing \n glove = torchtext.vocab.GloVe(name=\"6B\", dim=50)\n dataset[metadata_col] = dataset[metadata_col].apply(lambda y: __pad__(str(y).split(\" \"), 30))\n dataset[metadata_col] = dataset[metadata_col].apply(lambda z: __glove_embed__(z, glove))\n\n train_data = dataset[dataset[unique_id_col].isin(partition['train'])]\n valid_data = dataset[dataset[unique_id_col].isin(partition['valid'])]\n\n # create train/valid generators\n training_set = AbstractDataset(data=train_data, labels=labels, metadata=config['metadata'], list_IDs=partition['train'], max_len = max_len)\n training_generator = DataLoader(training_set, **params)\n\n validation_set = AbstractDataset(data=valid_data, labels=labels, metadata=config['metadata'], list_IDs=partition['valid'],max_len = max_len)\n \n validation_generator = DataLoader(validation_set, **params)\n\n return partition, training_generator, validation_generator", "def train_dataloader(self):\r\n\r\n # transformation\r\n train_transform = Compose(\r\n [\r\n ApplyTransformToKey(\r\n key='video',\r\n transform=Compose(\r\n [\r\n UniformTemporalSubsample(8),\r\n Lambda(lambda x: x / 255.0),\r\n Normalize((0.45, 0.45, 0.45), (0.225, 0.225, 0.225)),\r\n RandomShortSideScale(min_size=256, max_size=320),\r\n RandomCrop(244),\r\n RandomHorizontalFlip(p=0.5),\r\n ]\r\n )\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = pv.data.Kinetics(\r\n data_path=os.path.join(self._DATA_PATH, \"train\"),\r\n clip_sampler=pv.data.make_clip_sampler(\"random\", self._CLIP_DURATION),\r\n decode_audio=False,\r\n transform=train_transform\r\n )\r\n return torch.utils.data.DataLoader(\r\n train_dataset,\r\n batch_size=self._BATCH_SIZE,\r\n num_workers=self._NUM_WORKERS,\r\n )", "def get_driving_data_loaders(batch_size, train_dataset, valid_dataset, test_dataset, num_workers=0): \n\n valid_loader = DataLoader(dataset=valid_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=True)\n\n train_loader = DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n drop_last=True, \n shuffle=True)\n\n test_loader = DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=False)\n\n return train_loader, valid_loader, test_loader", "def load_data(data_feeder):\n return data_feeder(BATCH_SIZE,\n SEQ_LEN,\n OVERLAP,\n Q_LEVELS,\n Q_ZERO,\n Q_TYPE)", "def load_data_preprocess(self):\n\n print(\"Loading the dataset ...\")\n # load the data\n c_util = CarUtils()\n train_x, train_y, test_x, test_y, classes = c_util.load_data()\n\n # set the image ordering\n K.set_image_dim_ordering(\"th\")\n\n print(\"Pre-processing the dataset ...\")\n # pre-process the data\n train_x = train_x.astype('float32')\n test_x = test_x.astype('float32')\n\n train_x = train_x / 255\n test_x = test_x / 255\n\n print(train_x.shape[0], ' train samples')\n print(test_x.shape[0], ' test samples')\n\n train_y = np_utils.to_categorical(train_y, CarsClassifierModel._nb_classes)\n test_y = np_utils.to_categorical(test_y, CarsClassifierModel._nb_classes)\n\n return train_x, train_y, test_x, test_y", "def __init__(self, data_loader,\n data_train,\n data_test,\n dataset_name,\n model_kind,\n transaction_cost=0.0,\n BATCH_SIZE=30,\n GAMMA=0.7,\n ReplayMemorySize=50,\n TARGET_UPDATE=5,\n n_step=10,\n window_size=20):\n self.data_train = data_train\n self.data_test = data_test\n self.DATASET_NAME = dataset_name\n self.BATCH_SIZE = BATCH_SIZE\n self.GAMMA = GAMMA\n self.ReplayMemorySize = ReplayMemorySize\n self.window_size = window_size\n self.model_kind = model_kind\n\n self.split_point = data_loader.split_point\n self.begin_date = data_loader.begin_date\n self.end_date = data_loader.end_date\n\n self.TARGET_UPDATE = TARGET_UPDATE\n self.n_step = n_step\n self.transaction_cost = transaction_cost\n\n self.memory = ReplayMemory(ReplayMemorySize)\n\n self.train_test_split = True if data_test is not None else False\n\n self.EPS_START = 0.9\n self.EPS_END = 0.05\n self.EPS_DECAY = 500\n\n self.steps_done = 0\n\n self.PATH = os.path.join(Path(os.path.abspath(os.path.dirname(__file__))).parent,\n f'Results/{self.DATASET_NAME}/'\n f'{self.model_kind}; '\n f'DATA_KIND({self.data_train.data_kind}); '\n f'BEGIN_DATE({self.begin_date}); '\n f'END_DATE({self.end_date}); '\n f'SPLIT_POINT({self.split_point}); '\n f'WindowSize({self.window_size}); '\n f'BATCH_SIZE{self.BATCH_SIZE}; '\n f'GAMMA{self.GAMMA}; '\n f'REPLAY_MEMORY_SIZE{self.ReplayMemorySize}; '\n f'TARGET_UPDATE{self.TARGET_UPDATE}; '\n f'N_STEP{self.n_step}')\n\n if not os.path.exists(self.PATH):\n os.makedirs(self.PATH)\n\n self.model_dir = os.path.join(self.PATH, f'model.pkl')", "def make_dataloaders(params):\r\n transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465],\r\n [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor()])\r\n\r\n trainset = torchvision.datasets.CIFAR10(root=params['path'], train=True, transform=transform_train)\r\n testset = torchvision.datasets.CIFAR10(root=params['path'], train=False, transform=transform_validation)\r\n\r\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, num_workers=4)\r\n testloader = torch.utils.data.DataLoader(testset, batch_size=params['batch_size'], shuffle=False, num_workers=4)\r\n return trainloader, testloader", "def train_test_loaders(dataset, validation_ratio=0.2, **kwargs):\n dataset_size = len(dataset)\n test_size = int(np.floor(validation_ratio * dataset_size))\n train_size = dataset_size - test_size\n print('TRAIN SIZE {}'.format(train_size))\n print('TEST SIZE {}'.format(test_size))\n train_dataset, test_dataset = random_split(dataset, (train_size, test_size),\n generator=torch.Generator().manual_seed(RANDOM_SEED))\n train_loader = torch.utils.data.DataLoader(train_dataset, **kwargs)\n test_loader = torch.utils.data.DataLoader(test_dataset, **kwargs)\n return train_loader, test_loader", "def _create_data_loader(self, data, **kwargs):\n if data is None:\n return None\n\n # Set DataLoader config\n # NOTE: Not applicable if data is already a DataLoader\n config = {\n **self.config[\"train_config\"][\"data_loader_config\"],\n **kwargs,\n \"pin_memory\": self.config[\"device\"] != \"cpu\",\n }\n # Return data as DataLoader\n if isinstance(data, DataLoader):\n return data\n elif isinstance(data, Dataset):\n return DataLoader(data, **config)\n elif isinstance(data, (tuple, list)):\n return DataLoader(self._create_dataset(*data), **config)\n else:\n raise ValueError(\"Input data type not recognized.\")", "def get_data_loader(batch_size=10, num_workers=2):\n \n data_loader = torch.utils.data.DataLoader(dataset=TempuckeyDataSet(),\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate)\n return data_loader", "def __init__(self, data_dir, data_set, subset, batch_size, rng=None, shuffle=False, **kwargs):\n\n self.data_dir = data_dir\n self.batch_size = batch_size\n self.shuffle = shuffle\n\n self.data = load(os.path.join(data_dir, data_set+'.npz'), subset=subset)\n \n self.p = 0 # pointer to where we are in iteration\n self.rng = np.random.RandomState(1) if rng is None else rng", "def load_data(self,split='train'):\n return load_arrow_data(self.config,split)", "def data_loader(origin_data, batch_size, num_epochs=1):\n data = {}\n for key, value in origin_data.items():\n data[key] = np.copy(value)\n\n data_size = len(data['text_len'])\n num_batches_per_epoch = int((data_size-1)/batch_size) + 1\n\n for epoch in range(num_epochs):\n # shuffle the dataset at the begging of each epoch\n shuffle_indices = np.random.permutation(np.arange(data_size))\n for key, value in data.items():\n data[key] = value[shuffle_indices]\n\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n\n max_text_len = max(data['text_len'][start_index:end_index])\n\n yield (data['text'][start_index:end_index, :max_text_len],\n data['text_len'][start_index:end_index],\n data['label'][start_index:end_index],\n data['raw'][start_index:end_index])", "def data_loader(self, dataset=None, shuffle=True, size=None):\n self.log.info(\"Loading data\")\n assert dataset is not None, \"Please provide a dataset (folder name)\"\n data_path = os.path.join(config.DATA_DIR, dataset)\n # Load dataset using pickle\n with open(data_path, 'rb') as file:\n image_paths, labels = pickle.load(file)\n one_hot_labels = []\n images = []\n for i, (path, label) in enumerate(zip(image_paths, labels)):\n try:\n # One-hot encode the vectors\n one_hot = [0, 0, 0, 0]\n one_hot[label] = 1.0\n # Clean up image, normalize (mean to 0, std to 1)\n image = Image.open(path)\n image = np.asarray(image, dtype=np.float32) / 255\n except: # If there is some issue reading in data, skip datapoint\n continue\n one_hot_labels.append(np.asarray(one_hot, dtype=np.float32))\n images.append(image)\n # Shuffle data before cutting it into test and src\n if shuffle:\n x = list(zip(images, one_hot_labels))\n random.shuffle(x)\n images, one_hot_labels = zip(*x)\n self.log.info(\"Separating data into test and src.\")\n split_idx = int(config.TRAIN_TEST_SPLIT * len(one_hot_labels))\n train_input = images[:split_idx]\n train_target = one_hot_labels[:split_idx]\n test_input = images[split_idx:]\n test_target = one_hot_labels[split_idx:]\n if size:\n assert size < len(train_input), \"Final dataset size too big, not enough data\"\n train_input = train_input[:size]\n train_target = train_target[:size]\n self.log.info(\" -- test : {}\".format(len(test_target)))\n self.log.info(\" -- src: {}\".format(len(train_target)))\n # Convert to nparray before sending over\n return np.array(train_input), \\\n np.array(train_target), \\\n np.array(test_input), \\\n np.array(test_target)", "def dataio_prepare(hparams, tokenizer):\n\n # 1. Define datasets\n data_folder = hparams[\"data_folder\"]\n\n train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"train_csv\"], replacements={\"data_root\": data_folder},\n )\n\n if hparams[\"sorting\"] == \"ascending\":\n # we sort training data to speed up training and get better results.\n train_data = train_data.filtered_sorted(\n sort_key=\"duration\",\n key_max_value={\"duration\": hparams[\"avoid_if_longer_than\"]},\n )\n # when sorting do not shuffle in dataloader ! otherwise is pointless\n hparams[\"dataloader_options\"][\"shuffle\"] = False\n\n elif hparams[\"sorting\"] == \"descending\":\n train_data = train_data.filtered_sorted(\n sort_key=\"duration\",\n reverse=True,\n key_max_value={\"duration\": hparams[\"avoid_if_longer_than\"]},\n )\n # when sorting do not shuffle in dataloader ! otherwise is pointless\n hparams[\"dataloader_options\"][\"shuffle\"] = False\n\n elif hparams[\"sorting\"] == \"random\":\n pass\n\n else:\n raise NotImplementedError(\n \"sorting must be random, ascending or descending\"\n )\n\n valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"valid_csv\"], replacements={\"data_root\": data_folder},\n )\n # We also sort the validation data so it is faster to validate\n valid_data = valid_data.filtered_sorted(sort_key=\"duration\")\n\n test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(\n csv_path=hparams[\"test_csv\"], replacements={\"data_root\": data_folder},\n )\n\n # We also sort the validation data so it is faster to validate\n test_data = test_data.filtered_sorted(sort_key=\"duration\")\n\n datasets = [train_data, valid_data, test_data]\n\n # 2. Define audio pipeline:\n @sb.utils.data_pipeline.takes(\"wav\")\n @sb.utils.data_pipeline.provides(\"sig\")\n def audio_pipeline(wav):\n info = torchaudio.info(wav)\n sig = sb.dataio.dataio.read_audio(wav)\n resampled = torchaudio.transforms.Resample(\n info.sample_rate, hparams[\"sample_rate\"],\n )(sig)\n return resampled\n\n sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)\n\n # 3. Define text pipeline:\n @sb.utils.data_pipeline.takes(\"wrd\")\n @sb.utils.data_pipeline.provides(\n \"tokens_list\", \"tokens_bos\", \"tokens_eos\", \"tokens\"\n )\n def text_pipeline(wrd):\n tokens_list = tokenizer.sp.encode_as_ids(wrd)\n yield tokens_list\n tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list))\n yield tokens_bos\n tokens_eos = torch.LongTensor(tokens_list + [hparams[\"eos_index\"]])\n yield tokens_eos\n tokens = torch.LongTensor(tokens_list)\n yield tokens\n\n sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)\n\n # 4. Set output:\n sb.dataio.dataset.set_output_keys(\n datasets, [\"id\", \"sig\", \"tokens_bos\", \"tokens_eos\", \"tokens\"],\n )\n return train_data, valid_data, test_data", "def preprocess_train_data(self):\r\n print(\"* Preprocessing training data.\", flush=True)\r\n prep.create_HDF_file(self.C.training_set, is_training_set=True)\r\n\r\n self.print_time_elapsed()", "def get_dataloaders(args):\n if args.dataset == 'heat':\n dataset_class = heat.HeatDiffusionDataset\n else:\n raise ValueError(f'Unknown dataset {args.dataset}')\n train_dataset = dataset_class(\n dataset_class.get_train_path(args.data_path), args, train=True)\n if args.dist:\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset)\n else:\n train_sampler = torch.utils.data.RandomSampler(train_dataset)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, num_workers=args.workers,\n sampler=train_sampler, pin_memory=True, drop_last=args.drop_last)\n if not args.no_eval:\n validation_dataset = dataset_class(\n dataset_class.get_validation_path(args.data_path), args, train=False)\n if args.dist:\n validation_sampler = torch.utils.data.distributed.DistributedSampler(\n validation_dataset, shuffle=False)\n else:\n validation_sampler = torch.utils.data.SequentialSampler(\n validation_dataset)\n validation_loader = torch.utils.data.DataLoader(\n validation_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=validation_sampler,\n pin_memory=True, drop_last=args.drop_last)\n\n test_dataset = dataset_class(\n dataset_class.get_test_path(args.data_path), args, train=False)\n if args.dist:\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, shuffle=False)\n else:\n test_sampler = torch.utils.data.SequentialSampler(\n test_dataset)\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=test_sampler,\n pin_memory=True, drop_last=args.drop_last)\n else:\n validation_loader = None\n test_loader = None\n\n # Update the data shape if needed.\n if args.data_shape is None:\n args.data_shape = train_dataset.get_shape()\n if args.data_target_shape is None:\n args.data_target_shape = train_dataset.get_target_shape()\n\n return train_loader, validation_loader, test_loader", "def fit(self, data_loader):\n train_data, valid_data = data_loader.load()\n\n self.compile(self.optimizer, self.loss)\n super().fit(\n x=train_data,\n validation_data=valid_data,\n validation_steps=32, # validate 32 batches at a time\n validation_freq=1, # validate every 1 epoch\n epochs=self.hparams.num_epochs,\n shuffle=False, # dataset instances already handle shuffling\n )\n self.save()", "def load_dataloaders(args):\n logger.info(\"Loading dataloaders...\")\n p_path = os.path.join(\"./data/\", \"df_unencoded.pkl\")\n train_path = os.path.join(\"./data/\", \"df_encoded.pkl\")\n if (not os.path.isfile(p_path)) and (not os.path.isfile(train_path)):\n df = get_data(args, load_extracted=False)\n elif os.path.isfile(p_path) and (not os.path.isfile(train_path)):\n df = get_data(args, load_extracted=True)\n elif os.path.isfile(train_path):\n df = load_pickle(\"df_encoded.pkl\")\n \n # Train-Test split\n msk = np.random.rand(len(df)) < args.train_test_ratio\n trainset = df[msk]\n testset = df[~msk]\n \n trainset = text_dataset(trainset, args)\n max_features_length = trainset.max_x_len\n max_seq_len = trainset.max_y_len\n train_length = len(trainset)\n train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True,\\\n num_workers=0, collate_fn=Pad_Sequence(), pin_memory=False)\n \n testset = text_dataset(testset, args)\n test_length = len(testset)\n test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=True,\\\n num_workers=0, collate_fn=Pad_Sequence(), pin_memory=False)\n return train_loader, train_length, max_features_length, max_seq_len, test_loader, test_length", "def build_dataloaders(dataset, batch_size, train_test_split=0.1, train_shuffle=True, eval_shuffle=True):\n # 데이터셋 길이\n dataset_len = len(dataset)\n\n # 학습, 평가 데이터 나누기\n eval_len = int(dataset_len * train_test_split)\n train_len = dataset_len - eval_len\n\n train_dataset, eval_dataset = random_split(dataset, (train_len, eval_len))\n\n train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=train_shuffle)\n eval_loader = DataLoader(eval_dataset, batch_size=batch_size, shuffle=eval_shuffle)\n\n\n logging.info(f'''train_dataloader size: {len(train_loader.dataset)} | shuffle: {train_shuffle}\n eval_dataloader size: {len(eval_loader.dataset)} | shuffle: {eval_shuffle}''')\n\n return train_loader, eval_loader", "def load_data():\n\n # Load data from categories\n comp = fetch_20newsgroups(subset='all', categories=['comp.graphics', 'comp.sys.mac.hardware', 'comp.windows.x'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n science = fetch_20newsgroups(subset='all', categories=['sci.crypt', 'sci.electronics', 'sci.space'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n politics = fetch_20newsgroups(subset='all', categories=['talk.politics.guns', 'talk.politics.mideast'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n religion = fetch_20newsgroups(subset='all', categories=['alt.atheism', 'soc.religion.christian'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n recreation = fetch_20newsgroups(subset='all', categories=['rec.autos', 'rec.sport.baseball', 'rec.sport.hockey'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n\n # Print total number of documents\n data_len = [len(comp.data), len(science.data), len(politics.data), len(recreation.data), len(religion.data)]\n\n # Subsample classes to create a balanced dataset\n sub_k = min(data_len)\n comp.data, comp.target = [list(t) for t in zip(*random.sample(list(zip(comp.data, comp.target)), sub_k))]\n science.data, science.target = [list(t) for t in zip(*random.sample(list(zip(science.data, science.target)), sub_k))]\n politics.data, politics.target = [list(t) for t in zip(*random.sample(list(zip(politics.data, politics.target)), sub_k))]\n religion.data, religion.target = [list(t) for t in zip(*random.sample(list(zip(religion.data, religion.target)), sub_k))]\n recreation.data, recreation.target = [list(t) for t in zip(*random.sample(list(zip(recreation.data, recreation.target)), sub_k))]\n\n # Subcategories labels\n subcat_comp = np.array(comp.target)\n subcat_scien = np.array(science.target) + len(comp.target_names)\n subcat_polit = np.array(politics.target) + len(comp.target_names) + len(science.target_names)\n subcat_rel = np.array(religion.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names)\n subcat_rec = np.array(recreation.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names) + len(religion.target_names)\n\n # Assign labels to train data based on categories\n y_comp = np.ones(len(comp.data))\n y_scien = 2*np.ones(len(science.data))\n y_polit = 3*np.ones(len(politics.data))\n y_rel = 4*np.ones(len(religion.data))\n y_rec = 5*np.ones(len(recreation.data))\n labels = np.concatenate((y_comp,y_scien,y_polit,y_rel,y_rec), axis=None)\n\n # Computers\n train_comp, test_comp, y_train_comp, y_test_comp, subcat_comp_train, subcat_comp_test = train_test_split(comp.data, y_comp, subcat_comp, test_size=0.2, random_state=42)\n train_comp, val_comp, y_train_comp, y_val_comp, subcat_comp_train, subcat_comp_val = train_test_split(train_comp, y_train_comp, subcat_comp_train, test_size=0.25, random_state=42)\n\n # Sciences\n train_scien, test_scien, y_train_scien, y_test_scien, subcat_scien_train, subcat_scien_test = train_test_split(science.data, y_scien, subcat_scien, test_size=0.2, random_state=42)\n train_scien, val_scien, y_train_scien, y_val_scien, subcat_scien_train, subcat_scien_val = train_test_split(train_scien, y_train_scien, subcat_scien_train, test_size=0.25, random_state=42)\n\n # Politics\n train_polit, test_polit, y_train_polit, y_test_polit, subcat_polit_train, subcat_polit_test = train_test_split(politics.data, y_polit, subcat_polit, test_size=0.2, random_state=42)\n train_polit, val_polit, y_train_polit, y_val_polit, subcat_polit_train, subcat_polit_val = train_test_split(train_polit, y_train_polit, subcat_polit_train, test_size=0.25, random_state=42)\n\n # Religion\n train_rel, test_rel, y_train_rel, y_test_rel, subcat_rel_train, subcat_rel_test = train_test_split(religion.data, y_rel, subcat_rel, test_size=0.2, random_state=42)\n train_rel, val_rel, y_train_rel, y_val_rel, subcat_rel_train, subcat_rel_val = train_test_split(train_rel, y_train_rel, subcat_rel_train, test_size=0.25, random_state=42)\n\n # Recreation\n train_rec, test_rec, y_train_rec, y_test_rec, subcat_rec_train, subcat_rec_test = train_test_split(recreation.data, y_rec, subcat_rec, test_size=0.2, random_state=42)\n train_rec, val_rec, y_train_rec, y_val_rec, subcat_rec_train, subcat_rec_val = train_test_split(train_rec, y_train_rec, subcat_rec_train, test_size=0.25, random_state=42)\n\n # Corpus from all categories in train set\n newsgroups_train = train_comp + train_scien + train_polit + train_rel + train_rec\n #print(f\"Total number of documents in all categories in the train set is {len(newsgroups_train)}.\")\n train_labels = np.concatenate((y_train_comp,y_train_scien,y_train_polit,y_train_rel,y_train_rec), axis=None)\n #print(train_labels.shape)\n train_subcat = np.concatenate((subcat_comp_train,subcat_scien_train,subcat_polit_train,subcat_rel_train,subcat_rec_train), axis=None)\n #print(train_subcat.shape)\n\n # Corpus from all categories in test set\n newsgroups_test = test_comp + test_scien + test_polit + test_rel + test_rec\n test_labels = np.concatenate((y_test_comp,y_test_scien,y_test_polit,y_test_rel,y_test_rec), axis=None)\n test_subcat = np.concatenate((subcat_comp_test,subcat_scien_test,subcat_polit_test,subcat_rel_test,subcat_rec_test), axis=None)\n\n # Corpus from all categories in validation set\n newsgroups_val = val_comp + val_scien + val_polit + val_rel + val_rec\n val_labels = np.concatenate((y_val_comp,y_val_scien,y_val_polit,y_val_rel,y_val_rec), axis=None)\n val_subcat = np.concatenate((subcat_comp_val,subcat_scien_val,subcat_polit_val,subcat_rel_val,subcat_rec_val), axis=None)\n\n # Data Split\n total = len(test_labels) + len(val_labels) + len(train_labels)\n\n return newsgroups_train, train_labels, newsgroups_test, test_labels, newsgroups_val, val_labels, train_subcat, test_subcat, val_subcat", "def train_dataloader(self) -> data.DataLoader:\n # Random weighted sampler to approach the imbalanced dataset\n self.weights = [1.0 / i for i in self.weights]\n\n _sample_weights = [0] * len(self.datasets['train'])\n\n for idx, (_, label) in enumerate(self.datasets['train']):\n _weight = self.weights[label]\n _sample_weights[idx] = _weight\n\n random_sampler = data.WeightedRandomSampler(_sample_weights,\n len(self.datasets['train']), replacement=False)\n\n return data.DataLoader(dataset=self.datasets['train'], batch_size=self.batch_size,\n num_workers=self.num_workers, pin_memory=False,\n sampler=random_sampler)", "def preprocess_data(self):\n\n self._preprocess_train_data()\n self._preprocess_test_data()", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def test_dataloader(self) -> data.DataLoader:\n return data.DataLoader(dataset=self.datasets['test'], batch_size=self.batch_size,\n num_workers=self.num_workers, shuffle=False, pin_memory=False)", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._results_ = None", "def get_data_loader(target_classes, batch_size):\n classes = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n ########################################################################\n # The output of torchvision datasets are PILImage images of range [0, 1].\n # We transform them to Tensors of normalized range [-1, 1].\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n trainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\n # Get the list of indices to sample from\n relevant_train_indices = get_relevant_indices(\n trainset,\n classes,\n target_classes)\n # Split into train and validation\n np.random.seed(1000) # Fixed numpy random seed for reproducible shuffling\n np.random.shuffle(relevant_train_indices)\n split = int(len(relevant_train_indices) * 0.8)\n relevant_train_indices, relevant_val_indices = relevant_train_indices[:split], relevant_train_indices[split:]\n train_sampler = SubsetRandomSampler(relevant_train_indices)\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n num_workers=0, sampler=train_sampler)\n val_sampler = SubsetRandomSampler(relevant_val_indices)\n val_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n num_workers=0, sampler=val_sampler)\n testset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\n relevant_test_indices = get_relevant_indices(testset, classes, target_classes)\n test_sampler = SubsetRandomSampler(relevant_test_indices)\n test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size,\n num_workers=0, sampler=test_sampler)\n return train_loader, val_loader, test_loader, classes", "def load_torchvision_data(data, label, test_size = 0.2, valid_size=0.1, splits=None, shuffle=True,\n stratified=False, random_seed=None, batch_size = 64,\n maxsize = None, maxsize_test=None, num_workers = 0):\n\n x_train, x_test, y_train, y_test = train_test_split(data, label, test_size = test_size, stratify=label)\n train, test = CERDataset(x_train, y_train), CERDataset(x_test, y_test)\n\n if type(train.targets) is list or type(train.targets) is np.ndarray:\n train.targets = torch.LongTensor(train.targets)\n test.targets = torch.LongTensor(test.targets)\n\n if not hasattr(train, 'classes') or not train.classes:\n train.classes = sorted(torch.unique(train.targets).tolist())\n test.classes = sorted(torch.unique(train.targets).tolist())\n\n ### Data splitting\n fold_idxs = {}\n if splits is None and valid_size == 0:\n ## Only train\n fold_idxs['train'] = np.arange(len(train))\n elif splits is None and valid_size > 0:\n ## Train/Valid\n train_idx, valid_idx = random_index_split(len(train), 1-valid_size, (maxsize, None)) # No maxsize for validation\n fold_idxs['train'] = train_idx\n fold_idxs['valid'] = valid_idx\n\n for k, idxs in fold_idxs.items():\n if maxsize and maxsize < len(idxs):\n fold_idxs[k] = np.sort(np.random.choice(idxs, maxsize, replace = False))\n\n sampler_class = SubsetRandomSampler if shuffle else SubsetSampler\n fold_samplers = {k: sampler_class(idxs) for k,idxs in fold_idxs.items()}\n\n ### Create DataLoaders\n dataloader_args = dict(batch_size=batch_size,num_workers=num_workers)\n\n fold_loaders = {k:dataloader.DataLoader(train, sampler=sampler,**dataloader_args)\n for k,sampler in fold_samplers.items()}\n\n if maxsize_test and maxsize_test < len(test):\n test_idxs = np.sort(np.random.choice(len(test), maxsize_test, replace = False))\n sampler_test = SubsetSampler(test_idxs) # For test don't want Random\n dataloader_args['sampler'] = sampler_test\n else:\n dataloader_args['shuffle'] = False\n test_loader = dataloader.DataLoader(test, **dataloader_args)\n fold_loaders['test'] = test_loader\n\n fnames, flens = zip(*[[k,len(v)] for k,v in fold_idxs.items()])\n fnames = '/'.join(list(fnames) + ['test'])\n flens = '/'.join(map(str, list(flens) + [len(test)]))\n\n if hasattr(train, 'data'):\n logger.info('Input Dim: {}'.format(train.data.shape[1:]))\n logger.info('Classes: {} (effective: {})'.format(len(train.classes), len(torch.unique(train.targets))))\n print(f'Fold Sizes: {flens} ({fnames})')\n\n return fold_loaders, {'train': train, 'test':test}", "def make_standard_loader(self, dataset):\n return torch.utils.data.DataLoader(\n dataset,\n batch_size=self.batch_size,\n shuffle=False,\n drop_last=False,\n pin_memory=not (cfg.DEBUG > 0),\n num_workers=self.num_workers,\n )", "def DataLoader(data_place):\n # Nd = []\n # Np = []\n # Nz = []\n # channel_num = []\n # images = []\n # id_labels = []\n # pose_labels = []\n\n # mycase\n # Nz = 50\n # channel_num = 3\n # images = np.load('{}/images.npy'.format(data_place))\n # id_labels = np.load('{}/ids.npy'.format(data_place))\n # pose_labels = np.load('{}/yaws.npy'.format(data_place))\n #\n # Np = int(pose_labels.max() + 1)\n # Nd = int(id_labels.max() + 1)\n #\n # return [images, id_labels, pose_labels, Nd, Np, Nz, channel_num]\n\n # mycase MultiPIE\n Nz = 50\n channel_num = 3\n image_attributes_df = pd.read_csv(data_place)\n\n Nd = int(np.max(image_attributes_df['Id'])+1)\n Np = int(np.max(image_attributes_df['pose'])+1)\n Ni = int(np.max(image_attributes_df['illum'])+1)\n\n return [image_attributes_df, Nd, Np, Ni, Nz, channel_num]", "def split_data(self):\r\n print('split data')\r\n np.random.shuffle(self.dataList)\r\n l = len(self.dataList)/self.fold\r\n self.dataList = [self.dataList[i*l: (i+1)*l] for i in range(self.fold-1)] + [self.dataList[(self.fold-1)*l:]] # each element in the list is splitted data list\r", "def load_data(root, num_seen, batch_size, num_workers):\n CIFAR10.init(root, num_seen)\n query_dataset = CIFAR10('query', transform=query_transform())\n seen_dataset = CIFAR10('seen', transform=train_transform())\n unseen_dataset = CIFAR10('unseen', transform=train_transform())\n retrieval_dataset = CIFAR10('retrieval', transform=train_transform())\n\n query_dataloader = DataLoader(\n query_dataset,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n seen_dataloader = DataLoader(\n seen_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n unseen_dataloader = DataLoader(\n unseen_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n retrieval_dataloader = DataLoader(\n retrieval_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n return query_dataloader, seen_dataloader, unseen_dataloader, retrieval_dataloader", "def data_loader(\n self, batch_size: int = 1, iter_steps: int = 0, batch_as_list: bool = True\n ) -> DataLoader:\n data = self.data\n datasets = []\n\n for _, dat in data.items():\n datasets.append(dat.dataset())\n\n if len(datasets) < 1:\n raise FileNotFoundError(\n \"no datasets available for this model to create a loader from\"\n )\n\n return DataLoader(\n *datasets,\n batch_size=batch_size,\n iter_steps=iter_steps,\n batch_as_list=batch_as_list,\n )", "def create_loader(dataset: Dataset, cfg: trainer_configs.BaseDatasetConfig, batch_size: int, *,\r\n collate_fn: Optional[Callable[[List[Any]], Any]] = None) -> DataLoader:\r\n # return DataLoader(\r\n # dataset, batch_size=batch_size, num_workers=cfg.num_workers,\r\n # drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r\n return DataLoader(\r\n dataset, batch_size=batch_size, shuffle=cfg.shuffle, num_workers=cfg.num_workers,\r\n drop_last=cfg.drop_last, collate_fn=collate_fn) # type: ignore\r", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def preprocessing_pipeline(self):\n self.__multilabel_processing()\n self.__split_dataset()\n self.__save_datasets()", "def train_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def load_dataset(data_dir, dataset_split, logger=None):\n current_dir = Path()\n dir_path = current_dir / \"data\" / \"break_data\" / \"preprocessed\"\n file_name = \"dataset_preprocessed_\" + dataset_split + \".pkl\"\n if not (dir_path / file_name).is_file():\n # Download and preprocess the BREAK dataset (logical form and lexicon), and save the preprocessed data.\n if logger:\n logger.info('Downloading and preparing datasets...')\n dataset_logical = load_dataset('break_data', dataset_split, cache_dir=data_dir)\n save_obj(dir_path, dataset_logical, file_name)\n\n # Load the saved preprocessed data.\n dataset = load_obj(dir_path, file_name)\n return dataset", "def data_partition(num_workers, data_set, separate=True):\n\n size = data_set.data.shape[0]\n ind = list(range(size))\n\n if separate:\n shuffle(ind)\n # worker_size is the number of samples per worker. The last worker however receives the additional samples\n worker_size = size // num_workers\n data = dict.fromkeys(list(range(num_workers)))\n\n for w in range(num_workers):\n if w is not num_workers - 1:\n data[w] = ind[w * worker_size: (w+1) * worker_size]\n # data[w][\"X\"] = X_train[ind[w * worker_size: (w + 1) * worker_size], :]\n # data[w][\"Y\"] = Y_train[ind[w * worker_size: (w + 1) * worker_size], :]\n else:\n data[w] = ind[w * worker_size:]\n # data[w][\"X\"] = X_train[ind[w * worker_size:], :]\n # data[w][\"Y\"] = Y_train[ind[w * worker_size:], :]\n\n else:\n data = dict.fromkeys(list(range(num_workers)))\n for w in range(num_workers):\n shuffle(ind)\n data[w] = ind\n # data[w][\"X\"] = X_train[ind, :]\n # data[w][\"Y\"] = Y_train[ind, :]\n\n return data", "def setup_datasets(self):\r\n\r\n train_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.RandomRotation(degrees=self.random_angle, resample=Image.BILINEAR),\r\n transforms.RandomResizedCrop(\r\n size=self.crop_size, scale=(1-self.random_scale, 1+self.random_scale), ratio=(1, 1)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n val_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.CenterCrop(self.crop_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='train2014',\r\n transform=train_transform,\r\n dataset_size_ratio=self.dataset_size_ratio\r\n )\r\n train_subset_dataset = Subset(train_dataset, range(0, len(train_dataset), 5*self.dataset_size_ratio))\r\n val_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='val2014',\r\n transform=val_transform,\r\n )\r\n\r\n train_loader = DataLoader(\r\n train_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=True,\r\n num_workers=self.num_workers\r\n )\r\n train_subset_loader = DataLoader(\r\n train_subset_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n val_loader = DataLoader(\r\n val_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n return train_loader, train_subset_loader, val_loader", "def batch_fit(self, data_loader: torch.utils.data.DataLoader, epochs: int):\n pass" ]
[ "0.7089528", "0.69590586", "0.6943236", "0.6820497", "0.67818403", "0.665066", "0.662082", "0.65793914", "0.65624624", "0.65569514", "0.6556639", "0.6548388", "0.6527811", "0.65153986", "0.6513853", "0.6507452", "0.65053266", "0.6497079", "0.64861447", "0.64734644", "0.64694095", "0.64604187", "0.64575887", "0.64521027", "0.64437073", "0.6428617", "0.64278525", "0.64177746", "0.6410698", "0.6404702", "0.6395263", "0.6383078", "0.6373902", "0.6369722", "0.63569915", "0.6354748", "0.63518393", "0.63460046", "0.63434017", "0.6338916", "0.6333969", "0.63264334", "0.63251513", "0.6321624", "0.6319531", "0.6318116", "0.63173205", "0.6308114", "0.6300641", "0.62945074", "0.6287585", "0.62846863", "0.6279365", "0.6274355", "0.62505513", "0.6246035", "0.6245711", "0.6236322", "0.6236113", "0.6229379", "0.62085766", "0.62047535", "0.6202123", "0.6201594", "0.619997", "0.61979556", "0.6190934", "0.6188987", "0.61814106", "0.61798495", "0.61717063", "0.61654145", "0.6159679", "0.6159022", "0.61512446", "0.61480296", "0.6144366", "0.61394805", "0.6139193", "0.6137181", "0.61229986", "0.61140454", "0.61138356", "0.6112638", "0.6101763", "0.60935855", "0.6089087", "0.608729", "0.60830957", "0.60795885", "0.60709816", "0.60702926", "0.6067522", "0.6065391", "0.605872", "0.6057294", "0.6057273", "0.6052119", "0.60518205", "0.6038688", "0.60385853" ]
0.0
-1
This function computes the distribution internal parameters from its two first moments.
def _compute_internals(self, moments): [mean, stdv] = moments internals = {} internals['a'] = mean - np.sqrt(3) * stdv internals['b'] = mean + np.sqrt(3) * stdv return internals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['mu'] = mean\n internals['sigma'] = stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['k'] = mean ** 2. / stdv ** 2.\n internals['LAMBDA'] = mean / stdv ** 2.\n\n return internals", "def calc_moments(distribution):\n x = torch.linspace(2, 22, 31)\n d_mean = torch.sum(x * distribution)\n d_var = torch.sum(distribution * (x - d_mean) ** 2) \n \n return d_mean, torch.sqrt(d_var)", "def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n mu = 0\r\n Cov = 0\r\n\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n\r\n X = []\r\n for i in x:\r\n j = [1,i[0]]\r\n X.append(j)\r\n X = np.array(X)\r\n\r\n common = np.matmul( X.T , X) + np.identity(2) * sigma2/beta\r\n common = np.linalg.inv(common)\r\n Cov = common * sigma2\r\n mu = np.matmul(common , np.matmul (X.T , z) )\r\n mu = mu.flatten()\r\n print(\"X.shape: \" , X.shape)\r\n print(\"z.shape: \",z.shape)\r\n print(\"Cov.shape\" , Cov.shape)\r\n print(\"mu.shape: \",mu.shape)\r\n density = util.density_Gaussian(mu , Cov , x_s).reshape(150 , 150 ).T\r\n print(\"density.shape\",density.shape)\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n\r\n \r\n\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )))\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.xlim = (-1,1)\r\n plt.ylim = (-1,1)\r\n plt.title('p(a|x1,z1....xn,zn) for '+ str(len(x)) +' samples')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x')\r\n\r\n return (mu,Cov)", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n cov = stdv / mean\n zeta = np.sqrt(np.log(1. + cov ** 2.))\n LAMBDA = np.log(mean) - 0.5 * zeta ** 2.\n internals = {}\n internals['LAMBDA'] = LAMBDA\n internals['zeta'] = zeta\n\n return internals", "def moments(self):", "def parameters(self):\n\n m = self.__m\n s = linalg.cholesky(self.__prod).transpose()\n w = self.__weight\n\n # Compute the parameters of the posterior distribution.\n return linalg.solve(s[:m, :m], s[:m, m:]), \\\n np.dot(s[:m, :m].transpose(), s[:m, :m]), \\\n np.dot(s[m:, m:].transpose(), s[m:, m:]) / w, \\\n w", "def parameters_to_marginal_moments(prob, distmu, distsigma):\n good = np.isfinite(prob) & np.isfinite(distmu) & np.isfinite(distsigma)\n prob = prob[good]\n distmu = distmu[good]\n distsigma = distsigma[good]\n distmean, diststd, _ = parameters_to_moments(distmu, distsigma)\n rbar = (prob * distmean).sum()\n r2bar = (prob * (np.square(diststd) + np.square(distmean))).sum()\n return rbar, np.sqrt(r2bar - np.square(rbar))", "def _get_distribution_variables(self, R):\n domain, Domain = self.domain_Domain\n phase_name = self.phase_name\n\n R_typ = self.phase_param.R_typ # [m]\n # Particle-size distribution (area-weighted)\n f_a_dist = self.phase_param.f_a_dist(R) # [m-1]\n\n # Ensure the distribution is normalised, irrespective of discretisation\n # or user input\n f_a_dist = f_a_dist / pybamm.Integral(f_a_dist, R) # [m-1]\n\n # Volume-weighted particle-size distribution\n f_v_dist = R * f_a_dist / pybamm.Integral(R * f_a_dist, R) # [m-1]\n\n # Number-based particle-size distribution\n f_num_dist = (f_a_dist / R**2) / pybamm.Integral(\n f_a_dist / R**2, R\n ) # [m-1]\n\n # True mean radii and standard deviations, calculated from the f_a_dist that\n # was given, all have units [m]\n R_num_mean = pybamm.Integral(R * f_num_dist, R)\n R_a_mean = pybamm.Integral(R * f_a_dist, R)\n R_v_mean = pybamm.Integral(R * f_v_dist, R)\n sd_num = pybamm.sqrt(pybamm.Integral((R - R_num_mean) ** 2 * f_num_dist, R))\n sd_a = pybamm.sqrt(pybamm.Integral((R - R_a_mean) ** 2 * f_a_dist, R))\n sd_v = pybamm.sqrt(pybamm.Integral((R - R_v_mean) ** 2 * f_v_dist, R))\n\n # X-average the means and standard deviations to give scalars\n # (to remove the \"electrode\" domain, if present)\n R_num_mean = pybamm.x_average(R_num_mean)\n R_a_mean = pybamm.x_average(R_a_mean)\n R_v_mean = pybamm.x_average(R_v_mean)\n sd_num = pybamm.x_average(sd_num)\n sd_a = pybamm.x_average(sd_a)\n sd_v = pybamm.x_average(sd_v)\n\n # X-averaged distributions, or broadcast\n if R.domains[\"secondary\"] == [f\"{domain} electrode\"]:\n f_a_dist_xav = pybamm.x_average(f_a_dist)\n f_v_dist_xav = pybamm.x_average(f_v_dist)\n f_num_dist_xav = pybamm.x_average(f_num_dist)\n else:\n f_a_dist_xav = f_a_dist\n f_v_dist_xav = f_v_dist\n f_num_dist_xav = f_num_dist\n\n # broadcast\n f_a_dist = pybamm.SecondaryBroadcast(f_a_dist_xav, [f\"{domain} electrode\"])\n f_v_dist = pybamm.SecondaryBroadcast(f_v_dist_xav, [f\"{domain} electrode\"])\n f_num_dist = pybamm.SecondaryBroadcast(\n f_num_dist_xav, [f\"{domain} electrode\"]\n )\n\n variables = {\n f\"{Domain} {phase_name}particle sizes\": R / R_typ,\n f\"{Domain} {phase_name}particle sizes [m]\": R,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_a_dist,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_v_dist,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" distribution [m-1]\": f_num_dist,\n f\"{Domain} area-weighted mean particle radius [m]\": R_a_mean,\n f\"{Domain} volume-weighted mean particle radius [m]\": R_v_mean,\n f\"{Domain} number-based mean particle radius [m]\": R_num_mean,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_a,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_v,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" standard deviation [m]\": sd_num,\n # X-averaged sizes and distributions\n f\"X-averaged {domain} {phase_name}particle sizes [m]\": pybamm.x_average(R),\n f\"X-averaged {domain} area-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_a_dist_xav,\n f\"X-averaged {domain} volume-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_v_dist_xav,\n f\"X-averaged {domain} number-based {phase_name}particle-size \"\n \"distribution [m-1]\": f_num_dist_xav,\n }\n\n return variables", "def grd_posterior_gaussian(self, ) -> Tuple[np.ndarray, np.ndarray]:\n xmin, xmax = self.x_range\n ymin, ymax = self.y_range\n\n mu = np.array([0, 0])\n sigma = np.zeros((2, 2))\n\n _sample = self._sample\n _prior = self.prior\n\n def mean_x(x: float, y: float):\n return x * _sample(x, y) * _prior.eval(x, y)\n\n def mean_y(x: float, y: float):\n return y * _sample(x, y) * _prior.eval(x, y)\n\n def var_x(x: float, y: float):\n return x * mean_x(x, y)\n\n def var_y(x: float, y: float):\n return y * mean_y(x, y)\n\n # def var_xy(x: float, y: float):\n # return x * mean_y(x, y)\n\n # First moment\n (mu[0], mu[1]) = (integrate.dblquad(mean_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(mean_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0])\n (sigma[0, 0], sigma[1, 1]) = \\\n (integrate.dblquad(var_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(var_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n # integrate.dblquad(var_xy, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n return mu, sigma", "def calc_parameters(T, N, sigma, r, div):\n dt = T / N\n u = np.exp(sigma * np.sqrt(dt))\n d = 1 / u\n b = r - div\n q = 1 / 2 + 1 / 2 * (b - 1 / 2 * sigma ** 2) * np.sqrt(dt) / sigma # P(up movement)\n return dt, u, d, q, b", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n return paramDict", "def calc_parameters(T, N, sigma, r, div):\n dt = T/N\n u = np.exp(sigma*np.sqrt(dt))\n d = 1/u\n b = r-div\n q = 1/2 + 1/2 * (b - 1/2 * sigma**2)*np.sqrt(dt)/sigma # P(up movement)\n return(dt, u, d, q, b)", "def random():\n # only care about the value of second_moment:\n # curve = scale * e**(-second_moment^2 q^2)/q^2\n # scale = 6 pi/100 (contrast/density*absorbed_amount)^2 * Vf/radius\n # the remaining parameters can be randomly generated from zero to\n # twice the default value as done by default in compare.py\n pars = dict(\n scale=1,\n second_moment=10**np.random.uniform(1, 3),\n )\n return pars", "def N2_f(d1,d2,rho):\n import statsmodels.sandbox.distributions.extras as extras\n muStandardNormal=0.0 # mean of a standard normal distribution \n varStandardNormal=1.0 # variance of standard normal distribution \n upper=([d1,d2]) # upper bound for two values\n v=varStandardNormal # simplify our notations\n mu=muStandardNormal # simplify our notations\n covM=([v,rho],[rho,v])\n return extras.mvnormcdf(upper,mu,covM)", "def get_means_and_scales(self):\n return self.optim.parameters[::2], np.exp(self.optim.parameters[1::2])", "def initializeDistribution(self):\n self.convertToDistrDict['Laguerre'] = self.convertLaguerreToGamma\n self.convertToQuadDict ['Laguerre'] = self.convertGammaToLaguerre\n self.measureNormDict ['Laguerre'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed):\n # and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low)\n #self.lowerBoundUsed = 0.0\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Laguerre'\n self.preferredPolynomials = 'Laguerre'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = 0.0\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low,a,b)", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n paramDict['low'] = self.low\n return paramDict", "def parameters(conv, orthogonal):\n nrm = operator_one_norm(conv.weight).detach().cpu().numpy()\n\n if nrm > 15:\n print('Overflow likely, norm={}'.format(nrm))\n\n m = np.arange(1, len(THETA) + 1)\n vals = m * np.ceil(nrm / THETA)\n mstar = min(1 + np.argmin(vals), 56)\n s = int(np.ceil(nrm / THETA[mstar - 1]))\n\n return mstar, s", "def get_initial_parameters(token_segs):\r\n estems = {} # tracks the average probability of each root\r\n esuffix = {} # tracks the average probability of each suffix\r\n etrans = {} # tracks the average probability of each (transition, feature) pair\r\n eftrans = {} # tracks the average probability of each feature (interface between stem and suffix)\r\n\r\n # collect the probabilities of each object, to be normalized (divided by their totals) later\r\n for ts_list in token_segs:\r\n avg_prob = 1.0 / len(ts_list)\r\n for ts in ts_list:\r\n root = ts.root\r\n rand_val = 1.0\r\n if root in estems:\r\n estems[root] += rand_val * avg_prob\r\n else: estems[root] = rand_val * avg_prob\r\n\r\n suffix = ts.suffix\r\n if suffix in esuffix:\r\n esuffix[suffix] += rand_val * avg_prob\r\n else: esuffix[suffix] = rand_val * avg_prob\r\n\r\n trans = ts.trans\r\n ftrans = feature(root, suffix)\r\n if (trans, ftrans) in etrans:\r\n etrans[(trans, ftrans)] += rand_val * avg_prob\r\n else: etrans[(trans, ftrans)] = rand_val * avg_prob\r\n\r\n if ftrans in eftrans:\r\n eftrans[ftrans] += rand_val * avg_prob\r\n else: eftrans[ftrans] = rand_val * avg_prob\r\n\r\n # divide by the totals\r\n probstems = estems\r\n probsum = sum(probstems.values())\r\n for stem in probstems:\r\n probstems[stem] /= probsum\r\n\r\n probsuffix = esuffix\r\n probsum = sum(probsuffix.values())\r\n for suffix in probsuffix:\r\n probsuffix[suffix] /= probsum\r\n\r\n probtrans = etrans\r\n for trans, ftrans in probtrans:\r\n probtrans[(trans, ftrans)] /= eftrans[ftrans]\r\n\r\n return probstems, probsuffix, probtrans", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def get_distribution(self):\n\n # If the distributions have been updated before.\n if self.update_number > 0:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number,1):\n probability = self.document_distribution_over_topic[m][k] / self.update_number\n self.document_distribution_over_topic[m][k] = probability\n for k in range(0, self.topic_number,1):\n for v in range(0, self.term_number,1):\n probability = self.topic_distribution_over_term[k][v] / self.update_number\n self.topic_distribution_over_term[k][v] = probability\n # The distributions have not been updated once.\n else:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number, 1):\n self.document_distribution_over_topic[m][k] = (\n (self.document_topic_count_matrix[m][k] + self.alpha[k]) / (\n self.sum_document_by_topic_count[m] + self.sum_alpha))\n for k in range(0, self.topic_number, 1):\n for v in range(0, self.term_number, 1):\n self.topic_distribution_over_term[k][v] = (\n (self.topic_term_count_matrix[k][v] + self.beta[v]) / (\n self.sum_topic_by_term_count[k] + self.sum_beta))", "def generate_moments(hyper, params):\n\n k, d = hyper['k'], hyper['d']\n\n p = params # Shorthand, don't judge\n m = {} # Moments\n for x1 in xrange(1,d+1):\n m[(x1,)] = sum( p[(h,x1)] * p[(h,)] for h in xrange(1,k+1) )\n for x2 in xrange(1,d+1):\n m[(x1,x2)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,)] for h in xrange(1,k+1) )\n for x3 in xrange(1,d+1):\n m[(x1,x2,x3)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,x3)] * p[(h,)] for h in xrange(1,k+1) )\n return m", "def update_moments_r(self):\n denominator = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 )\n nominator1 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 ) * self.constellation\n \n nominator2 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2) * np.power(self.constellation, 2)\n try:\n \n moment1 = nominator1.sum(axis=1) / denominator.sum(axis=1)\n moment2 = nominator2.sum(axis=1) / denominator.sum(axis=1)\n assert np.all(np.logical_not(np.isnan(moment1))) and np.all(np.logical_not(np.isnan(moment2)))\n except:\n print(\"Oops! That was no valid number. Try again...\")\n\n \n self.mu = moment1\n return moment1, moment2", "def var_parameters(jd,mag,err):\n\n mean = np.mean(mag)\n nepochs = float(len(jd))\n\n chi = np.sum( (mag - mean)**2. / err**2. )\n p_chi = chi2.cdf(chi,(nepochs-1))\n\n\n a = (mag-mean)**2\n ex_var = (np.sum(a-err**2)/((nepochs*(mean**2))))\n sd = np.sqrt((1./(nepochs-1))*np.sum(((a-err**2)-ex_var*(mean**2))**2))\n ex_verr = sd/((mean**2)*np.sqrt(nepochs))\n\n\n return p_chi, ex_var, ex_verr", "def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n m_v = np.zeros(2)\r\n print(\"m_v shape: \" ,m_v.shape)\r\n c_v = np.array( [ [ beta , 0 ] , [ 0 , beta ] ] )\r\n print(\"c_v shape: \",c_v.shape)\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n print(\"x_s shape: \",x_s.shape)\r\n density = util.density_Gaussian(m_v , c_v , x_s)\r\n #print(density)\r\n print(\"length density \",len(density))\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )) )\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.title('p(a)')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x')\r\n return", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['strategy'] = self.strategy\n paramDict['nPoints'] = self.nPoints\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar # rate parameter\n paramDict['low' ] = self.low # lower domain boundary\n return paramDict", "def PH2From3Moments (moms, prec=1e-14):\n\n m1, m2, m3 = moms\n\n # check moment boounds\n m2l = APH2ndMomentLowerBound(m1, 2) \n m3l = APH3rdMomentLowerBound(m1, m2, 2) \n m3u = APH3rdMomentUpperBound(m1, m2, 2) \n \n if m2<m2l:\n raise Exception(\"The given second moment is not feasible!\") \n if m3<m3l:\n raise Exception(\"The given third moment is not feasible (too small)!\")\n if m3>m3u:\n raise Exception(\"The given third moment is not feasible (too large)!\")\n \n # check if we have an exponential distribution\n if abs(m2/m1/m1-2.0) < prec:\n return (np.matrix([1]), np.matrix([[-1/m1]]))\n \n # calculate parameters\n b = 3.0*m1*m2-m3\n c = 3.0*m2*m2-2.0*m1*m3\n e = -2.0*m1*m1+m2\n a = b*b+6.0*c*e\n if a<0:\n a = 0\n a = math.sqrt(a)\n if c>0:\n lambda1 = (b - a) / c\n lambda2 = (b + a) / c\n p = (-b-6.0*m1*e+a) / (b+a)\n elif c<0:\n lambda1 = (b + a) / c\n lambda2 = (b - a) / c\n p = (b+6.0*m1*e+a) / (-b+a)\n elif c==0:\n lambda1 = 0\n lambda2 = 1.0 / m1\n p = 0\n \n # return the result\n return (np.matrix([p,1.0-p]), np.matrix([[-lambda1, lambda1], [0,-lambda2]]))", "def prop_func_form_params(param1,param2,*arg):\n return np.log(MH.simple_2D_Gauss(param1-param2,arg[0],arg[1]))", "def fdist(param1, param2):\n return(prng.gamma(param1, param2))", "def initializeDistribution(self):\n self.convertToDistrDict['Jacobi'] = self.convertJacobiToBeta\n self.convertToQuadDict ['Jacobi'] = self.convertBetaToJacobi\n self.measureNormDict ['Jacobi'] = self.stdProbabilityNorm\n #this \"if\" section can only be called if distribution not generated using readMoreXML\n if (not self.upperBoundUsed) and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,self.low)\n else:\n if self.lowerBoundUsed == False:\n a = 0.0\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,a,b,self.low)\n self.preferredPolynomials = 'Jacobi'\n self.compatibleQuadrature.append('Jacobi')\n self.compatibleQuadrature.append('ClenshawCurtis')", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['apex' ] = self.apex\n paramDict['min' ] = self.min\n paramDict['max' ] = self.max\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mu' ] = self.mu\n return paramDict", "def distribution(self):\n \n #external_distribution serves both the purpose of external setting of distribution and the caching of distribution()\n if self.external_distribution:\n return self.external_distribution_array;\n else:\n energy_vector = []\n superset = self.generate_superset(0) \n \n for i in superset:\n state = self.ket(i)\n \n norm_squared = np.dot(state.T, state)\n \n if norm_squared > 0: #zero is appended at the end\n energy = np.dot(state.T, np.dot( self.epsilon, state))\n interaction = np.dot(state.T, np.dot( self.u, state))/2.0 #divide by two. Otherwise, <l r| U |l r > = U_LR + U_RL = 2U\n #print state, np.dot(self.u, state) \n #print interaction\n energy_vector.append( energy + interaction )\n \n energy_vector.insert(0, 0.0) \n probability = np.exp( np.multiply(-self.beta, energy_vector)) \n probability /= probability.sum() \n return probability", "def initializeDistribution(self):\n self.convertToDistrDict['Hermite'] = self.convertHermiteToNormal\n self.convertToQuadDict ['Hermite'] = self.convertNormalToHermite\n self.measureNormDict ['Hermite'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed) and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicNormalDistribution(self.mean,\n self.sigma)\n self.lowerBound = -sys.float_info.max\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Hermite'\n self.preferredPolynomials = 'Hermite'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = -sys.float_info.max\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicNormalDistribution(self.mean,\n self.sigma,\n a,b)", "def prop_dist_form_params(*arg):\n return np.random.multivariate_normal(*arg)", "def _get_prior_params(self):\n # relation transformation matrix\n M_mu = np.eye(self.n_polarities, dtype=\"float32\")\n M_mu[1, :] = [0., 0.3, 0.]\n M_mu = np.tile(M_mu, (self.n_rels, 1)).reshape(\n self.n_rels, self.n_polarities, self.n_polarities\n )\n # for rel, rel_idx in iteritems(self.rel2idx):\n # # swap axes for contrastive relations\n # if check_rel(rel, CONTRASTIVE_RELS):\n # mu_i = M_mu[rel_idx]\n # mu_i[[0, 2]] = mu_i[[2, 0]]\n M_mu = torch.tensor(M_mu)\n M_sigma = torch.tensor(\n np.ones((self.n_rels, self.n_polarities, self.n_polarities),\n dtype=\"float32\")\n )\n # beta\n beta_p = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n beta_q = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n # z_epsilon\n z_epsilon_p = torch.tensor(1.)\n z_epsilon_q = torch.tensor(15.)\n # scale factor\n scale_factor = torch.tensor(34.)\n return {\"M_mu\": M_mu, \"M_sigma\": M_sigma, \"beta_p\": beta_p,\n \"beta_q\": beta_q, \"z_epsilon_p\": z_epsilon_p,\n \"z_epsilon_q\": z_epsilon_q, \"scale_factor\": scale_factor}", "def demo_indef():\n n_dim = 2\n A = np.eye(n_dim)\n A[1, 1] = -1.\n covar = np.eye(n_dim)\n mean = np.zeros(n_dim)\n approx = approx_quad_form(mean, covar, A)\n\n # Sample from true dist\n n_sample = 10000\n x = np.random.multivariate_normal(mean, covar, n_sample)\n q_samples = np.zeros(n_sample)\n for i in range(n_sample):\n q_samples[i] = x[i] @ A @ x[i]\n\n q = np.linspace(-10, 10)\n\n plt.plot(\n q, approx(q), label='Approx.',\n color='tab:blue', linestyle='--')\n bins = np.linspace(-8, 8, 81)\n bins[0] = -np.inf\n bins[-1] = np.inf\n plt.hist(\n q_samples, density=True, histtype='stepfilled',\n bins=bins,\n alpha=0.5, color='black', label='Samples')\n plt.xlabel('q')\n plt.ylabel('pdf(q) [-]')\n plt.legend()\n\n central_moments_sample = scipy.stats.moment(\n q_samples, moment=[0, 1, 2, 3, 4])\n print(central_moments_sample)", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['mu'] = self.mean\n retDict['sigma'] = self.sigma\n return retDict", "def initializeDistribution(self):\n self.raiseAMessage('initialize distribution')\n mu = distribution1D.vectord_cxx(len(self.mu))\n for i in range(len(self.mu)):\n mu[i] = self.mu[i]\n covariance = distribution1D.vectord_cxx(len(self.covariance))\n for i in range(len(self.covariance)):\n covariance[i] = self.covariance[i]\n if self.method == 'spline':\n if self.covarianceType != 'abs':\n self.raiseAnError(IOError,'covariance with type ' + self.covariance + ' is not implemented for ' + self.method + ' method')\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu)\n elif self.method == 'pca':\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu, str(self.covarianceType), self.rank)\n if self.transformation:\n self.lowerBound = [-sys.float_info.max]*self.rank\n self.upperBound = [sys.float_info.max]*self.rank\n else:\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimension)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimension)]", "def _init_params(self):\n self.W_ems = []\n self.b_ems = []\n if self.rank_n_approx:\n W_em1 = self.init_fn[0](self.n_in,\n self.rank_n_approx,\n self.sparsity[0],\n self.scale[0],\n self.rng)\n W_em2 = self.init_fn[0](self.rank_n_approx,\n self.n_hids[0],\n self.sparsity[0],\n self.scale[0],\n self.rng)\n self.W_em1 = theano.shared(W_em1,\n name='W1_0_%s'%self.name)\n self.W_em2 = theano.shared(W_em2,\n name='W2_0_%s'%self.name)\n self.W_ems = [self.W_em1, self.W_em2]\n\n else:\n W_em = self.init_fn[0](self.n_in,\n self.n_hids[0],\n self.sparsity[0],\n self.scale[0],\n self.rng)\n self.W_em = theano.shared(W_em,\n name='W_0_%s'%self.name)\n self.W_ems = [self.W_em]\n\n self.b_em = theano.shared(\n self.bias_fn[0](self.n_hids[0], self.bias_scale[0],self.rng),\n name='b_0_%s'%self.name)\n self.b_ems = [self.b_em]\n\n for dx in range(1, self.n_layers):\n W_em = self.init_fn[dx](self.n_hids[dx-1] / self.pieces[dx],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n self.rng)\n W_em = theano.shared(W_em, name='W_%d_%s'%(dx,self.name))\n self.W_ems += [W_em]\n\n b_em = theano.shared(\n self.bias_fn[dx](self.n_hids[dx], self.bias_scale[dx],self.rng),\n name='b_%d_%s'%(dx,self.name))\n self.b_ems += [b_em]\n\n self.params = [x for x in self.W_ems]\n\n if self.learn_bias and self.learn_bias!='last':\n self.params = [x for x in self.W_ems] + [x for x in self.b_ems]\n elif self.learn_bias == 'last':\n self.params = [x for x in self.W_ems] + [x for x in\n self.b_ems][:-1]\n self.params_grad_scale = [self._grad_scale for x in self.params]\n if self.weight_noise:\n self.nW_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_ems]\n self.nb_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_ems]\n\n self.noise_params = [x for x in self.nW_ems] + [x for x in self.nb_ems]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape)\n for x in self.noise_params]", "def Rosenblatt_Transform(dist, x_i): \n if dist.stats(moments = 's') > 1 or dist.stats(moments = 's') < -1:\n \n x_N_mean = dist.median()\n x_N_std = (x_i - x_N_mean)/sst.norm.ppf(dist.cdf(x_i))\n \n return(x_N_mean, x_N_std)\n \n else:\n x_N_std = sst.norm.pdf(sst.norm.ppf(dist.cdf(x_i)))/dist.pdf(x_i)\n x_N_mean = x_i - sst.norm.ppf(dist.cdf(x_i))*x_N_std\n return(x_N_mean, x_N_std)", "def _get_marginal_pdfs( res, nbins=51, verbose=True ):\n\tvparam_names = res.vparam_names\n\tweights = res.weights\n\tsamples = res.samples\n\n\tpdfdict = {}\n\n\tfor param in vparam_names :\n\t\tipar = vparam_names.index( param )\n\t\tparamvals = samples[:,ipar]\n\n\t\tif nbins>1:\n\t\t\tif param in res.bounds :\n\t\t\t\tparvalmin, parvalmax = res.bounds[param]\n\t\t\telse :\n\t\t\t\tparvalmin, parvalmax = 0.99*paramvals.min(), 1.01*paramvals.max()\n\t\t\tparambins = np.linspace( parvalmin, parvalmax, nbins, endpoint=True ).flatten()\n\t\t\tbinindices = np.digitize( paramvals, parambins )\n\n\t\t\t# we estimate the marginalized pdf by summing the weights of all points in the bin,\n\t\t\t# where the weight of each point is the prior volume at that point times the\n\t\t\t# likelihood, divided by the total evidence\n\t\t\tpdf = np.array( [ weights[np.where( binindices==ibin )].sum() for ibin in range(len(parambins)) ] )\n\t\telse :\n\t\t\tparambins = None\n\t\t\tpdf = None\n\n\n\t\tmean = (weights * samples[:,ipar]).sum()\n\t\t#print(samples[:,ipar]-mean)\n\t\t#print(weights)\n\t\tstd = np.sqrt( (weights * (samples[:,ipar]-mean)**2 ).sum() )\n\n\n\t\tpdfdict[param] = (parambins,pdf,mean,std,res.logz)\n\n\t\tif verbose :\n\t\t\tif np.abs(std)>=0.1:\n\t\t\t\tprint( ' <%s> = %.2f +- %.2f'%( param, np.round(mean,2), np.round(std,2)) )\n\t\t\telif np.abs(std)>=0.01:\n\t\t\t\tprint( ' <%s> = %.3f +- %.3f'%( param, np.round(mean,3), np.round(std,3)) )\n\t\t\telif np.abs(std)>=0.001:\n\t\t\t\tprint( ' <%s> = %.4f +- %.4f'%( param, np.round(mean,4), np.round(std,4)) )\n\t\t\telse :\n\t\t\t\tprint( ' <%s> = %.3e +- %.3e'%( param, mean, std) )\n\n\n\t\tif param == 'x0' :\n\t\t\tsalt2 = sncosmo.Model( source='salt2')\n\t\t\tsalt2.source.set_peakmag( 0., 'bessellb', 'ab' )\n\t\t\tx0_AB0 = salt2.get('x0')\n\t\t\tmBmean = -2.5*np.log10( mean / x0_AB0 )\n\t\t\tmBstd = 2.5*np.log10( np.e ) * std / mean\n\t\t\tmBbins = -2.5*np.log10( parambins / x0_AB0 )\n\n\t\t\tpdfdict['mB'] = ( mBbins, pdf, mBmean, mBstd )\n\t\t\tif verbose:\n\t\t\t\tprint( ' <%s> = %.3f +- %.3f'%( 'mB', np.round(mBmean,3), np.round(mBstd,3)) )\n\n\treturn( pdfdict )", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict", "def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n\r\n # mean of posterior distribution is the MAP estimate of the weights a\r\n # tau^2(from notes) is beta\r\n\r\n extra_col = np.ones((x.shape[0], 1))\r\n x = np.append(extra_col, x, axis = 1)\r\n\r\n alpha_map = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))@(np.transpose(x)@z)\r\n mu = alpha_map\r\n\r\n Cov = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))*sigma2\r\n\r\n num_x = 100\r\n num_y = 100\r\n\r\n xvalues = np.linspace(-1, 1, num = num_x)\r\n yvalues = np.linspace(-1, 1, num = num_y)\r\n X_grid, Y_grid = np.meshgrid(xvalues, yvalues)\r\n\r\n samples = np.column_stack((X_grid.flatten(), Y_grid.flatten()))\r\n\r\n density = util.density_Gaussian(mu.squeeze(), Cov, samples)\r\n density_grid = np.reshape(density, (num_x, num_y))\r\n\r\n plt.figure(1)\r\n plt.title(\"Posterior Distribution of α Given 5 Data Points\")\r\n plt.xlabel('$α_0$')\r\n plt.ylabel('$α_1$')\r\n plt.scatter(-0.1, -0.5, c='r')\r\n plt.contour(X_grid, Y_grid, density_grid, cmap=plt.cm.winter)\r\n plt.show()\r\n\r\n return (mu,Cov)", "def get_prob_for_distributions(p):\n w1 = p[0]\n mu1 = p[1]\n sigma1 = p[2]\n w2 = p[3]\n mu2 = p[4]\n sigma2 = p[5]\n w3 = p[6]\n mu3 = p[7]\n sigma3 = p[8]\n dist_range = (0, 4.330310991999920844e+01)\n x = np.linspace(dist_range[0], dist_range[1], 1000)\n A1 = np.array(w1 * mlab.normpdf(x, mu1, sigma1)).sum()\n A2 = np.array(w2 * mlab.normpdf(x, mu2, sigma2)).sum()\n A3 = np.array(w3 * mlab.normpdf(x, mu3, sigma3)).sum()\n p1 = A1 / (A1 + A2 + A3)\n p2 = A2 / (A1 + A2 + A3)\n p3 = A3 / (A1 + A2 + A3)\n return p1, p2, p3", "def Get_params(numparams, dt, D):\n # bounds from table 1 Kowalek et al 2020\n Nmin, Nmax = 30, 600\n Bmin, Bmax = 1, 6\n Rmin, Rmax = 1, 17\n alphamin, alphamax = 0.3, 0.7\n Qmin, Qmax = 1, 9\n\n # Gen parameters\n Q = np.random.uniform(Qmin, Qmax, size=numparams)\n Q1, Q2 = Q, Q\n\n NsND = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsAD = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsCD = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsDM = np.random.randint(Nmin, Nmax + 1, size=numparams)\n TDM = NsDM * dt\n\n B = np.random.uniform(Bmin, Bmax, size=numparams)\n r_c = np.sqrt(D * NsCD * dt / B) # solving for r_c in eq. 8 Kowalek\n\n R = np.random.uniform(Rmin, Rmax, size=numparams)\n v = np.sqrt(R * 4 * D / TDM) # solving for v in eq. 7 Kowalek\n\n alpha = np.random.uniform(alphamin, alphamax, size=numparams)\n\n # Compute sigma for ND, AD, CD from eq. 12 Kowalek\n sigmaND = np.sqrt(D * dt) / Q1\n sigmaAD = np.sqrt(D * dt) / Q1\n sigmaCD = np.sqrt(D * dt) / Q1\n\n # Compute sigma for DM from eq. 12 Kowalek\n sigmaDM = np.sqrt(D * dt + v ** 2 * dt ** 2) / Q2\n\n return np.array(\n [\n NsND,\n NsAD,\n NsCD,\n NsDM,\n D * np.ones(numparams),\n dt * np.ones(numparams),\n r_c,\n v,\n alpha,\n sigmaND,\n sigmaAD,\n sigmaCD,\n sigmaDM,\n ]\n ).T", "def _calpara(self):\n self.up = math.exp(self.sigma*math.sqrt(self.deltatime))\n self.down = math.exp(-self.sigma*math.sqrt(self.deltatime))\n self.upprob = (math.exp((self.r-self.d)*self.deltatime)-self.down)/(self.up-self.down)", "def APH2ndMomentLowerBound (m1, n):\n\n return float(m1)*m1*(n+1) / n", "def _construct_mom_stuff(self):\n a = self.mom_mix_rate\n dist_mean = self.GN.dist_mean\n dist_cov = self.GN.dist_cov\n # Get the generated sample observations for this batch, transformed\n # linearly into the desired space for moment matching...\n X_b = T.dot(self.GN.output, self.mom_match_proj)\n # Get their mean\n batch_mean = T.mean(X_b, axis=0)\n # Get the updated generator distribution mean\n new_mean = ((1.0 - a[0]) * self.GN.dist_mean) + (a[0] * batch_mean)\n # Use the mean to get the updated generator distribution covariance\n X_b_minus_mean = X_b - new_mean\n # Whelp, I guess this line needs the cast... for some reason...\n batch_cov = T.dot(X_b_minus_mean.T, X_b_minus_mean) / T.cast(X_b.shape[0], 'floatX')\n new_cov = ((1.0 - a[0]) * self.GN.dist_cov) + (a[0] * batch_cov)\n # Get the cost for deviation from the target distribution's moments\n mean_err = new_mean - self.target_mean\n cov_err = (new_cov - self.target_cov)\n mm_cost = self.mom_match_weight[0] * \\\n (T.sum(mean_err**2.0) + T.sum(cov_err**2.0))\n # Construct the updates for the running estimates of the generator\n # distribution's first and second-order moments.\n mom_updates = OrderedDict()\n mom_updates[self.GN.dist_mean] = new_mean\n mom_updates[self.GN.dist_cov] = new_cov\n return [mm_cost, mom_updates]", "def aicpdf(xvals, distribution, params):\n if distribution == 'pareto':\n pvals = (params['xmin'] * params['mu'] ** params['xmin']) / (xvals ** (params['xmin'] + 1))\n return pvals\n \n elif distribution == 'lognormal':\n #import pdb; pdb.set_trace()\n pvals = np.exp(-(np.log(xvals) - params['mu'])**2 / (2 * params['sigma']**2)) / (xvals * params['sigma'] * np.sqrt(2*np.pi))\n return pvals\n \n elif distribution == 'normal':\n pvals = np.exp(-(xvals - params['mu'])**2 / (2 * params['sigma']**2)) / (params['sigma'] * np.sqrt(2*np.pi))\n return pvals\n \n elif distribution == 'exponential':\n pvals = params['lambda'] * np.exp(-params['lambda'] * xvals)\n return pvals \n \n elif distribution == 'boundedpl':\n #pvals = (params['mu'] * (params['mu'] ** params['xmax'] - params['xmin'] ** params['xmax'])) / (xvals ** (params['mu'] + 1))\n #mu * (xmax ^ mu - xmin ^ mu) / x ^ (mu+1)\n pvals = (params['mu'] * (params['xmax'] ** params['mu'] - params['xmin'] ** params['mu'])) / (xvals ** (params['mu'] + 1))\n return pvals", "def moment(self, n, mu, sigma):\n return scipy_norm.moment(n, mu, sigma)", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def computeMoments(x):\n return (abs(stats.skew(x)),abs(stats.kurtosis(x,None,True)))", "def initializeDistribution(self):\n self.minVal = min(math.exp(self.upperBound),math.exp(self.lowerBound))\n self.maxVal = max(math.exp(self.upperBound),math.exp(self.lowerBound))", "def initializeDistribution(self):\n self.convertToDistrDict['Legendre'] = self.convertLegendreToUniform\n self.convertToQuadDict ['Legendre'] = self.convertUniformToLegendre\n self.measureNormDict ['Legendre'] = self.stdProbabilityNorm\n self.convertToDistrDict['ClenshawCurtis'] = self.convertLegendreToUniform\n self.convertToQuadDict ['ClenshawCurtis'] = self.convertUniformToLegendre\n self.measureNormDict ['ClenshawCurtis'] = self.stdProbabilityNorm\n self._distribution = distribution1D.BasicUniformDistribution(self.lowerBound,self.lowerBound+self.range)", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale))", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['mu'] = self.mean\n retDict['sigma'] = self.sigma\n retDict['low'] = self.low\n return retDict", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def estimate_gaussian_params(X):\n mu = X.mean(axis=0)\n var = X.std(axis=0)**2.0\n return mu,var", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::vbfH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)\", mu)')", "def like_one(theta,dt,dmag,sigma):\n\n gamma, A = theta\n aux=(1/np.sqrt(2*np.pi*Veff2(dt,sigma,A,gamma)))*np.exp(-1.0*(dmag**2)/(2.0*Veff2(dt,sigma,A,gamma)))\n\n return aux", "def update_params(x, prior, posterior):\r\n mu0, kappa0, alpha0, beta0 = prior\r\n mu_t, kappa_t, alpha_t, beta_t = posterior\r\n return np.r_[mu0, (kappa_t*mu_t + x)/(kappa_t + 1)], \\\r\n np.r_[kappa0, kappa_t + 1], \\\r\n np.r_[alpha0, alpha_t + 0.5], \\\r\n np.r_[beta0, beta_t + 0.5*kappa_t*(x - mu_t)**2/(kappa_t + 1)]", "def prior_params_tree(self):\n id = {name:i for i, name in enumerate(list(self.tree.keys()))}\n n_nodes = len(id)\n dist_mx = np.zeros((n_nodes, n_nodes))\n\n for node1, edges in self.tree.items():\n for node2, dist in edges.dist:\n dist_mx[id[node1], id[node2]] = dist\n dist_mx[id[node2], id[node1]] = dist\n\n # while np.count_nonzero(dist_mx) < (n_nodes ** 2 - n_nodes):\n for _ in range(20):\n for i, j in combinations(range(n_nodes), 2):\n if dist_mx[i,j] > 0:\n continue\n row_i = dist_mx[i]\n row_j = dist_mx[j]\n value = (row_i + row_j) * (row_i > 0) * (row_j > 0)\n dist_mx[i, j] = dist_mx[j, i] = - max(np.unique(value))\n dist_mx = np.abs(dist_mx)\n\n evolve_rate = []\n for node1, node2 in combinations(self.m_cov.keys(), 2):\n mx_cov_dist = np.abs(self.m_cov[node1] - self.m_cov[node2])\n elements = mx_cov_dist[np.triu_indices(len(mx_cov_dist))]\n norm_elements = elements / dist_mx[id[node2], id[node1]]\n evolve_rate += list(norm_elements)\n\n\n\n df = np.mean([p.shape[0] for _, p in self.m_profiles.items()])\n p_theta_alpha = df/2\n # p_theta_alpha = 4\n p_theta_beta = np.percentile(evolve_rate, 75) * (p_theta_alpha - 1)\n # print(p_theta_alpha, p_theta_beta)\n return p_theta_alpha, p_theta_beta", "def __init__(self, \n param_epsilon, \n param_tau,\n param_u, \n param_gamma_left,\n param_gamma_right,\n param_beta):\n self.epsilon = param_epsilon\n self.tau = param_tau\n self.u = param_u\n self.gamma_left = param_gamma_left\n self.gamma_right = param_gamma_right\n \n self.sigma_retarded = 1j * (self.gamma_left + self.gamma_right) / 2.0\n self.sigma_advanced = - self.sigma_retarded;\n \n self.dim = len(self.u)\n self.rho = np.zeros((2**self.dim))\n \n self.beta = param_beta\n \n self.cutoff_chance = 0.0001\n self.external_distribution = False\n self.external_distribution_array = self.distribution()\n self.external_distribution = True", "def _get_parameters(n, j, domain, g, ncap):\n alphas, betas = rc.recurrenceCoefficients(n - 2, lb=domain[0], rb=domain[1],\n j=j, g=g, ncap=ncap)\n omegas = g * np.array(alphas)\n ts = g * np.sqrt(np.array(betas)[1::])\n c0 = np.sqrt(betas[0])\n return omegas, ts, c0", "def aicmle(timeSeries, distribution):\n mlevals = {} \n if distribution == 'pareto':\n mlevals['xmin'] = np.min(timeSeries)\n mlevals['mu'] = 1 - timeSeries.shape[0] / (timeSeries.shape[0] * np.log(mlevals['xmin']) - np.sum(np.log(timeSeries)))\n \n elif distribution == 'lognormal':\n mlevals['mu'] = np.sum(np.log(timeSeries)) / timeSeries.shape[0]\n mlevals['sigma'] = np.sqrt(np.sum( (np.log(timeSeries) - mlevals['mu'])**2) / timeSeries.shape[0])\n \n elif distribution == 'normal':\n mlevals['mu'] = np.mean(timeSeries)\n mlevals['sigma'] = np.sqrt(sum((timeSeries - np.mean(timeSeries))**2) / timeSeries.shape[0])\n \n elif distribution == 'exponential':\n mlevals['lambda'] = 1.0 / np.mean(timeSeries)\n \n elif distribution == 'boundedpl':\n mlevals['xmin'] = np.min(timeSeries)\n mlevals['xmax'] = np.max(timeSeries)\n minmuEstimate = 1.1\n mlevals['mu'] = fmin(lambda mu: -len(timeSeries) * np.log( (mu - 1) / (np.min(timeSeries)**(1 - mu) - np.max(timeSeries)**(1 - mu))) + mu * np.sum(np.log(timeSeries)), minmuEstimate, disp=0)[0]\n\n return mlevals", "def __init__(self, mean=0.0, sigma=1.0):\n super().__init__()\n self.mean = mean\n self.sigma = sigma\n self.hasInfiniteBound = True\n self.type = 'Normal'\n self.distType = 'Continuous'\n self.compatibleQuadrature.append('Hermite')\n self.compatibleQuadrature.append('CDF')\n #THESE get set in initializeDistribution, since it depends on truncation\n #self.preferredQuadrature = 'Hermite'\n #self.preferredPolynomials = 'Hermite'", "def compute_t_params(mu, kappa, alpha, beta):\r\n mu_, sigma2_, dof_ = mu, beta*(kappa + 1)/(alpha*kappa), 2*alpha\r\n return mu_, sigma2_, dof_", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['n' ] = self.n\n paramDict['p' ] = self.p\n return paramDict", "def generate_stat(sample_size, sparsity = 0, amplitude = 0, sigma = 1):\n var = generate_variable(sample_size, sparsity, amplitude, sigma)\n y_obs = var[0]\n \n \"\"\" \n f is equal to -X(t,theta) and we will minimize f (max. X)\n \"\"\"\n def f(x):\n \"\"\" \n f(x)=-X(t,theta) where x[0]=t and x[1]=theta\n \"\"\"\n res = np.real(np.exp(-1j*x[1])*\\\n sum(y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1))) \n res = -res/np.sqrt(2*sample_size+1) \n return res\n \n def grad_f(x):\n \"\"\" \n gradient of f\n \"\"\"\n res1 = np.real(np.exp(-1j*x[1])*\\\n sum(1j*k*y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1)))\n res1 = -res1/np.sqrt(2*sample_size+1)\n \n res2 = np.real(np.exp(-1j*x[1])*\\\n sum(-1j*y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1)))\n res2 = -res2/np.sqrt(2*sample_size+1)\n return np.array([res1, res2])\n \n #% Minimizing f\n \n \"\"\" \n we minimize on [0, 2pi]^2\n \"\"\"\n bnds = ((0, 2*np.pi), (0, 2*np.pi))\n \n \"\"\" \n We begin by a greedy search of the initialization point over a grid of size 126^2\n the initialization point is init\n \"\"\"\n x = y = np.arange(0, 2*np.pi, 0.05)\n steps = 126\n X, Y = np.meshgrid(x, y)\n val = np.array([f([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])\n init = np.argmin(val)\n x1 = init%steps\n x2 = (init-x1)/steps\n init = [x1*0.05, x2*0.05]\n \n \"\"\" \n we minimize f...\n \"\"\"\n result = sco.minimize(f, init, method=\"L-BFGS-B\",\\\n jac=grad_f, bounds=bnds, tol=1e-15)\n \n \"\"\" \n (t1,theta1) is the argmax of X(t, theta) and l1=$\\lambda_1$\n \"\"\"\n t1 = result.x[0]\n theta1 = result.x[1]\n l1 = -f([t1,theta1])\n \n \n \"\"\" \n Function g(x) is equal to (X(t1,theta1)-X(x))/(1-rho((t1,theta1)-x))\n \"\"\"\n def g(x):\n a0 = x[0]-t1\n a1 = x[1]-theta1\n N = 2*sample_size+1\n \n vec = np.array([a0,a1])\n r = np.linalg.norm(vec)\n \"\"\" \n the value for r=0 is set to l1 (note that r=0 corresponds to x=(t1,theta1))\n \"\"\" \n res = l1 \n \n if (0<r) & (r<0.00001):\n \"\"\" \n we look a values near (t1,theta1) for which an indetermination occurs\n \"\"\" \n alpha= np.arccos(np.clip(a0/np.sqrt(a0**2+a1**2), -1.0, 1.0))\n u0 = np.cos(alpha)\n u1 = np.sin(alpha)\n \"\"\" \n u0,u1 defines the direction (unit vector)\n \"\"\"\n denom = sum((k*np.cos(alpha)-np.sin(alpha))**2*\\\n (np.sinc((r*(k*np.cos(alpha)-np.sin(alpha)))/(2*np.pi)))**2\\\n for k in range(-sample_size,sample_size+1))/N\n \"\"\" \n denom computes the denominator\n \"\"\"\n \n# \"\"\" \n# We use simpson rule for the numerator\n# \"\"\"\n# h = np.linspace(0,1,500)\n# \n# b0 = t1 + h*a0\n# b1 = theta1 + h*a1\n# \n# value = (1-h)*(u0**2*\\\n# np.real(np.exp(-1j*b1)*sum(-k**2*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))\\\n# +2*u0*u1*\\\n# np.real(np.exp(-1j*b1)*sum(k*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))\\\n# +u1**2*\\\n# np.real(np.exp(-1j*b1)*sum((-1)*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))) \n# value = value/np.sqrt(N)\n# \n# num = sci.simps(value, h)\n \n \"\"\" \n we use a quadrature for the numerator\n \"\"\" \n fun_int = lambda w: (1-w)*(u0**2*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum(-k**2*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))\\\n +2*u0*u1*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum(k*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))\\\n +u1**2*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum((-1)*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))) \n \n num = np.mean(sci.quad(fun_int, 0, 1, epsabs=1e-15, epsrel=1e-15, limit=1000))\n \n res = -num/denom\n \n if (r>=0.00001):\n \"\"\" \n we look a values far (t1,theta1) for which there is no indetermination\n \"\"\" \n res = (l1+f(x))/(1-(np.cos(a1)*dirichlet(a0,N)/N))\n \n return res \n \"\"\" \n we minimize g on [0, 2pi]^2 an dwe llok for the initialization point\n \"\"\"\n val2 = np.array([g([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])\n init2 = np.argmin(val2)\n x1 = init2%steps\n x2 = (init2-x1)/steps\n init2 = [x1*0.05, x2*0.05] \n result2 = sco.minimize(g, init2, method=\"L-BFGS-B\", bounds=bnds, tol=1e-15) \n \"\"\" \n argmin of g\n \"\"\"\n t2 = result2.x[0]\n theta2 = result2.x[1] \n \"\"\" \n value of lambda_2\n \"\"\"\n l21 = l1-result2.fun \n a0 = t2-t1\n a1 = theta2-theta1\n N = 2*sample_size+1\n l22 = l1-(l1+f([t2,theta2]))/(1-(np.cos(a1)*dirichlet(a0,N)/N))\n l2 = max(l21,l22)\n \"\"\" \n we compute the statistic\n \"\"\"\n alpha1 = (1/3)*sample_size*(sample_size+1)\n alpha2 = (1/np.sqrt(N))*\\\n sum((k**2-alpha1)*\\\n np.real(y_obs[k+sample_size]*np.exp(1j*(k*t1-theta1))) \\\n for k in range(-sample_size,sample_size+1))\n alpha3 = (1/np.sqrt(N))*sum(k*np.real(y_obs[k+sample_size]*\\\n np.exp(1j*(k*t1-theta1))) for k in range(-sample_size,sample_size+1)) \n stat = (sigma*(alpha1*l1+alpha2)*scs.norm.pdf(l1/sigma)+\\\n (alpha1*sigma**2-alpha3**2)*(1-scs.norm.cdf(l1/sigma)))/\\\n (sigma*(alpha1*l2+alpha2)*scs.norm.pdf(l2/sigma)+\\\n (alpha1*sigma**2-alpha3**2)*(1-scs.norm.cdf(l2/sigma))) \n \n return stat", "def moments2nd(data):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol)) \n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mrr = np.sum(rowgrid**2*data)/Isum\n Mcc = np.sum(colgrid**2*data)/Isum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*data)/Isum \n return Mcc, Mrr, Mrc", "def weights_treatment_parameters(init_dict, GRID):\n GRID = np.linspace(0.01, 0.99, num=99, endpoint=True)\n\n coeffs_untreated = init_dict[\"UNTREATED\"][\"params\"]\n coeffs_treated = init_dict[\"TREATED\"][\"params\"]\n cov = construct_covariance_matrix(init_dict)\n x = simulate_covariates(init_dict)\n\n # We take the specified distribution for the cost shifters from the paper.\n cost_mean, cost_sd = -0.0026, np.sqrt(0.270)\n v_mean, v_sd = 0.00, np.sqrt(cov[2, 2])\n\n eval_points = norm.ppf(GRID, loc=v_mean, scale=v_sd)\n\n ate_weights = np.tile(1.0, 99)\n tut_weights = norm.cdf(eval_points, loc=cost_mean, scale=cost_sd)\n\n tt_weights = 1 - tut_weights\n\n def tut_integrand(point):\n eval_point = norm.ppf(point, loc=v_mean, scale=v_sd)\n return norm.cdf(eval_point, loc=cost_mean, scale=cost_sd)\n\n def tt_integrand(point):\n eval_point = norm.ppf(point, loc=v_mean, scale=v_sd)\n return norm.cdf(eval_point, loc=cost_mean, scale=cost_sd)\n\n # Scaling so that the weights integrate to one.\n tut_scaling = quad(tut_integrand, 0.01, 0.99)[0]\n tut_weights /= tut_scaling\n\n tt_scaling = quad(tt_integrand, 0.01, 0.99)[0]\n tt_weights /= tt_scaling\n\n mte = mte_information(coeffs_treated, coeffs_untreated, cov, GRID, x, init_dict)\n\n return ate_weights, tt_weights, tut_weights, mte", "def _second_moment(R, sig_l, sig_m, lum, mass, Mbh, beta, tensor,\n sigmaPsf, normPsf, step, nrad, surf_l, pixSize):\n if (max(sigmaPsf) > 0) and (pixSize > 0): # PSF convolution\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n\n # Make grid linear in log of radius RR\n #\n rmax = np.max(R) + mx # Radius of circle containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in log(RR)\n rr = np.exp(logRad)\n\n # The model Vrms computation is only performed on the radial grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(rr)\n mgePol = np.empty_like(rr)\n rup = 3*np.max(sig_l)\n for j in range(rr.size): # Integration of equation (50)\n wm2Pol[j] = quadva(_integrand, [rr[j], rup],\n args=(sig_l, sig_m, lum, mass, Mbh, rr[j], beta, tensor))[0]\n mgePol[j] = np.sum(surf_l * np.exp(-0.5*(rr[j]/sig_l)**2))\n\n nx = np.ceil(rmax/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n xCar, yCar = np.meshgrid(x1, x1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + yCar**2) # Log radius of cartesian grid\n wm2Car = np.interp(r1, logRad, wm2Pol)\n mgeCar = np.interp(r1, logRad, mgePol)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normalization is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = np.sqrt(signal.fftconvolve(wm2Car, kernel, mode='same')\n / signal.fftconvolve(mgeCar, kernel, mode='same'))\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, x1, muCar, R/np.sqrt(2), R/np.sqrt(2))\n\n else: # No PSF convolution: just compute values\n\n mu = np.empty_like(R)\n rmax = 3*np.max(sig_l)\n for j in range(R.size):\n wm2Pol = quadva(_integrand, [R[j], rmax],\n args=(sig_l, sig_m, lum, mass, Mbh, R[j], beta, tensor))[0]\n mgePol = np.sum( surf_l * np.exp(-0.5*(R[j]/sig_l)**2) )\n mu[j] = np.sqrt(wm2Pol/mgePol)\n\n return mu", "def get_thrust_and_moment(self):\n\n f1 = self.k_f * self.omega_1 ** 2\n f2 = self.k_f * self.omega_2 ** 2\n \n # c is often used to indicate \"collective\" thrust\n c = f1 + f2\n \n M_x = (f1 - f2) * self.l\n return c, M_x", "def d2(self):\r\n return self.d1() - self.sigma*self.t**0.5", "def d2(self):\n d1 = self.d1()\n return d1 - self.sigma * (self.t **(0.5))", "def get_dists_2():\n d1 = Distribution(['0', '1'], [1 / 2, 1 / 2])\n d2 = Distribution(['0', '1'], [1 / 3, 2 / 3])\n d3 = Distribution(['0', '1'], [2 / 5, 3 / 5])\n return d1, d2, d3", "def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), nu=(0.0 ,inf), r=(0.0, inf), s=(0.0, inf))\n return params", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low' ] = self.low\n paramDict['high' ] = self.high\n paramDict['alpha'] = self.alpha\n paramDict['beta' ] = self.beta\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['range'] = self.range\n return paramDict\n # no other additional parameters required", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar\n paramDict['k' ] = self.k\n paramDict['low' ] = self.low\n return paramDict", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def MyBaseMoments(p,q,img,gauss_sigma,gauss_centroid=None, gauss_g1=0., gauss_g2=0.):\n weight = galsim.Image(np.zeros_like(img.array))\n gauss = galsim.Gaussian(sigma=gauss_sigma*pixel_scale).shear(g1=gauss_g1,g2=gauss_g2)\n if gauss_centroid is None:\n gauss_centroid = img.true_center\n weight = gauss.drawImage(image=weight, scale=pixel_scale, method='no_pixel', use_true_center=True, offset=(gauss_centroid-img.true_center)*(1))\n x = np.linspace(img.xmin-img.center.x*0-gauss_centroid.x*1, img.xmax-img.center.x*0-gauss_centroid.x*1, img.xmax-img.xmin+1)+0.*0.5\n y = np.linspace(img.ymin-img.center.y*0-gauss_centroid.y*1, img.ymax-img.center.y*0-gauss_centroid.y*1, img.ymax-img.ymin+1)+0.*0.5\n X, Y = np.meshgrid(x,y)\n\n Q00 = np.sum(weight.array*img.array)\n Q10 = gauss_centroid.x + np.sum(X*weight.array*img.array)/Q00\n Q01 = gauss_centroid.y + np.sum(Y*weight.array*img.array)/Q00\n Q20 = np.sum((X**2)*weight.array*img.array)\n Q02 = np.sum((Y**2)*weight.array*img.array)\n\n monomial = 1.\n for pp in xrange(p):\n monomial *= X\n for qq in xrange(q):\n monomial *= Y\n Qpq = np.sum(monomial*weight.array*img.array) #/Q00\n\n return Qpq", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def initialize(self):\n\t\tmu = 0\n\t\tsigma = np.sqrt(2 / self.dataset[\"d\"])\n\n\t\tself.F1 = np.random.normal(mu, sigma, self.F1.shape)\n\t\tself.F2 = np.random.normal(mu, sigma, self.F2.shape)\n\t\tself.W = np.random.normal(mu, sigma, self.W.shape)\n\n\t\tself.F1_momentum = np.zeros(self.F1.shape)\n\t\tself.F2_momentum = np.zeros(self.F2.shape)\n\t\tself.W_momentum = np.zeros(self.W.shape)", "def init_hyperparameters():\n alpha = .8\n alpha2 = 1\n\n return alpha, alpha2", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def logdprior(parameters, hyperparameters):\n sigma_w_part = parameters[0] + invgamma_logpdf(parameters[0],\n hyperparameters[\"sigma_w_shape\"], hyperparameters[\"sigma_w_scale\"])\n sigma_v_part = parameters[1] + invgamma_logpdf(parameters[1], hyperparameters[\"sigma_v_shape\"], hyperparameters[\"sigma_v_scale\"])\n return sigma_w_part + sigma_v_part", "def initializeDistribution(self):\n if self.functionType == 'CDF':\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,True)\n else:\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,False)\n self.dimensionality = self._distribution.returnDimensionality()\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimensionality)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimensionality)]", "def get_initial_params(self, x, y, yerr):\n# p0 = zeros(self.max_phonons + 1)\n p0 = zeros(2)\n p0[0] = 100\n p0[1] = .1\n return p0", "def gaussian_parameters(h, dim=-1):\n m, h = torch.split(h, h.size(dim) // 2, dim=dim)\n v = F.softplus(h) + 1e-8\n return m, v", "def moments(values):\n\n meanValue = numpy.mean(values)\n return (meanValue,\n numpy.sqrt(moment(values, meanValue, 2)),\n moment(values, meanValue, 3),\n moment(values, meanValue, 4))" ]
[ "0.6455378", "0.6220392", "0.6185545", "0.6109156", "0.6106636", "0.60708535", "0.60512894", "0.60178155", "0.5966822", "0.59502286", "0.58735156", "0.5850575", "0.58171284", "0.5816514", "0.57661724", "0.5720821", "0.57173246", "0.57122564", "0.5709464", "0.57005703", "0.56566393", "0.56566393", "0.56566393", "0.5650613", "0.5647901", "0.56205666", "0.56183773", "0.5581819", "0.5580499", "0.5579431", "0.5574771", "0.55595124", "0.5549218", "0.5547417", "0.55450284", "0.55362755", "0.5533073", "0.55328864", "0.55292463", "0.55152035", "0.55004686", "0.54993844", "0.54803777", "0.547777", "0.54711723", "0.54631597", "0.5454735", "0.54525715", "0.5436794", "0.54347324", "0.54311454", "0.54289407", "0.5415726", "0.5407236", "0.540343", "0.5398714", "0.5398714", "0.53965294", "0.5371645", "0.5370123", "0.5353437", "0.5351195", "0.5346446", "0.5343545", "0.53380007", "0.53346354", "0.5332762", "0.53268677", "0.53158087", "0.53155935", "0.5310139", "0.5309718", "0.5300877", "0.5296502", "0.5292304", "0.5292062", "0.528314", "0.5280636", "0.5273658", "0.52717936", "0.5267175", "0.5266302", "0.5263959", "0.525519", "0.52547693", "0.5243865", "0.52428687", "0.5240332", "0.52383465", "0.52359915", "0.5235011", "0.5232048", "0.5220906", "0.5220906", "0.5220359", "0.521662", "0.5211572", "0.5206511", "0.52055675" ]
0.6434524
1
This function computes the distribution internal parameters from its two first moments.
def _compute_internals(self, moments): [mean, stdv] = moments internals = {} internals['a'] = mean - np.sqrt(3) * stdv internals['b'] = mean + np.sqrt(3) * stdv return internals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['mu'] = mean\n internals['sigma'] = stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['k'] = mean ** 2. / stdv ** 2.\n internals['LAMBDA'] = mean / stdv ** 2.\n\n return internals", "def calc_moments(distribution):\n x = torch.linspace(2, 22, 31)\n d_mean = torch.sum(x * distribution)\n d_var = torch.sum(distribution * (x - d_mean) ** 2) \n \n return d_mean, torch.sqrt(d_var)", "def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n mu = 0\r\n Cov = 0\r\n\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n\r\n X = []\r\n for i in x:\r\n j = [1,i[0]]\r\n X.append(j)\r\n X = np.array(X)\r\n\r\n common = np.matmul( X.T , X) + np.identity(2) * sigma2/beta\r\n common = np.linalg.inv(common)\r\n Cov = common * sigma2\r\n mu = np.matmul(common , np.matmul (X.T , z) )\r\n mu = mu.flatten()\r\n print(\"X.shape: \" , X.shape)\r\n print(\"z.shape: \",z.shape)\r\n print(\"Cov.shape\" , Cov.shape)\r\n print(\"mu.shape: \",mu.shape)\r\n density = util.density_Gaussian(mu , Cov , x_s).reshape(150 , 150 ).T\r\n print(\"density.shape\",density.shape)\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n\r\n \r\n\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )))\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.xlim = (-1,1)\r\n plt.ylim = (-1,1)\r\n plt.title('p(a|x1,z1....xn,zn) for '+ str(len(x)) +' samples')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x')\r\n\r\n return (mu,Cov)", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n cov = stdv / mean\n zeta = np.sqrt(np.log(1. + cov ** 2.))\n LAMBDA = np.log(mean) - 0.5 * zeta ** 2.\n internals = {}\n internals['LAMBDA'] = LAMBDA\n internals['zeta'] = zeta\n\n return internals", "def moments(self):", "def parameters(self):\n\n m = self.__m\n s = linalg.cholesky(self.__prod).transpose()\n w = self.__weight\n\n # Compute the parameters of the posterior distribution.\n return linalg.solve(s[:m, :m], s[:m, m:]), \\\n np.dot(s[:m, :m].transpose(), s[:m, :m]), \\\n np.dot(s[m:, m:].transpose(), s[m:, m:]) / w, \\\n w", "def parameters_to_marginal_moments(prob, distmu, distsigma):\n good = np.isfinite(prob) & np.isfinite(distmu) & np.isfinite(distsigma)\n prob = prob[good]\n distmu = distmu[good]\n distsigma = distsigma[good]\n distmean, diststd, _ = parameters_to_moments(distmu, distsigma)\n rbar = (prob * distmean).sum()\n r2bar = (prob * (np.square(diststd) + np.square(distmean))).sum()\n return rbar, np.sqrt(r2bar - np.square(rbar))", "def _get_distribution_variables(self, R):\n domain, Domain = self.domain_Domain\n phase_name = self.phase_name\n\n R_typ = self.phase_param.R_typ # [m]\n # Particle-size distribution (area-weighted)\n f_a_dist = self.phase_param.f_a_dist(R) # [m-1]\n\n # Ensure the distribution is normalised, irrespective of discretisation\n # or user input\n f_a_dist = f_a_dist / pybamm.Integral(f_a_dist, R) # [m-1]\n\n # Volume-weighted particle-size distribution\n f_v_dist = R * f_a_dist / pybamm.Integral(R * f_a_dist, R) # [m-1]\n\n # Number-based particle-size distribution\n f_num_dist = (f_a_dist / R**2) / pybamm.Integral(\n f_a_dist / R**2, R\n ) # [m-1]\n\n # True mean radii and standard deviations, calculated from the f_a_dist that\n # was given, all have units [m]\n R_num_mean = pybamm.Integral(R * f_num_dist, R)\n R_a_mean = pybamm.Integral(R * f_a_dist, R)\n R_v_mean = pybamm.Integral(R * f_v_dist, R)\n sd_num = pybamm.sqrt(pybamm.Integral((R - R_num_mean) ** 2 * f_num_dist, R))\n sd_a = pybamm.sqrt(pybamm.Integral((R - R_a_mean) ** 2 * f_a_dist, R))\n sd_v = pybamm.sqrt(pybamm.Integral((R - R_v_mean) ** 2 * f_v_dist, R))\n\n # X-average the means and standard deviations to give scalars\n # (to remove the \"electrode\" domain, if present)\n R_num_mean = pybamm.x_average(R_num_mean)\n R_a_mean = pybamm.x_average(R_a_mean)\n R_v_mean = pybamm.x_average(R_v_mean)\n sd_num = pybamm.x_average(sd_num)\n sd_a = pybamm.x_average(sd_a)\n sd_v = pybamm.x_average(sd_v)\n\n # X-averaged distributions, or broadcast\n if R.domains[\"secondary\"] == [f\"{domain} electrode\"]:\n f_a_dist_xav = pybamm.x_average(f_a_dist)\n f_v_dist_xav = pybamm.x_average(f_v_dist)\n f_num_dist_xav = pybamm.x_average(f_num_dist)\n else:\n f_a_dist_xav = f_a_dist\n f_v_dist_xav = f_v_dist\n f_num_dist_xav = f_num_dist\n\n # broadcast\n f_a_dist = pybamm.SecondaryBroadcast(f_a_dist_xav, [f\"{domain} electrode\"])\n f_v_dist = pybamm.SecondaryBroadcast(f_v_dist_xav, [f\"{domain} electrode\"])\n f_num_dist = pybamm.SecondaryBroadcast(\n f_num_dist_xav, [f\"{domain} electrode\"]\n )\n\n variables = {\n f\"{Domain} {phase_name}particle sizes\": R / R_typ,\n f\"{Domain} {phase_name}particle sizes [m]\": R,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_a_dist,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_v_dist,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" distribution [m-1]\": f_num_dist,\n f\"{Domain} area-weighted mean particle radius [m]\": R_a_mean,\n f\"{Domain} volume-weighted mean particle radius [m]\": R_v_mean,\n f\"{Domain} number-based mean particle radius [m]\": R_num_mean,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_a,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_v,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" standard deviation [m]\": sd_num,\n # X-averaged sizes and distributions\n f\"X-averaged {domain} {phase_name}particle sizes [m]\": pybamm.x_average(R),\n f\"X-averaged {domain} area-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_a_dist_xav,\n f\"X-averaged {domain} volume-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_v_dist_xav,\n f\"X-averaged {domain} number-based {phase_name}particle-size \"\n \"distribution [m-1]\": f_num_dist_xav,\n }\n\n return variables", "def grd_posterior_gaussian(self, ) -> Tuple[np.ndarray, np.ndarray]:\n xmin, xmax = self.x_range\n ymin, ymax = self.y_range\n\n mu = np.array([0, 0])\n sigma = np.zeros((2, 2))\n\n _sample = self._sample\n _prior = self.prior\n\n def mean_x(x: float, y: float):\n return x * _sample(x, y) * _prior.eval(x, y)\n\n def mean_y(x: float, y: float):\n return y * _sample(x, y) * _prior.eval(x, y)\n\n def var_x(x: float, y: float):\n return x * mean_x(x, y)\n\n def var_y(x: float, y: float):\n return y * mean_y(x, y)\n\n # def var_xy(x: float, y: float):\n # return x * mean_y(x, y)\n\n # First moment\n (mu[0], mu[1]) = (integrate.dblquad(mean_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(mean_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0])\n (sigma[0, 0], sigma[1, 1]) = \\\n (integrate.dblquad(var_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(var_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n # integrate.dblquad(var_xy, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n return mu, sigma", "def calc_parameters(T, N, sigma, r, div):\n dt = T / N\n u = np.exp(sigma * np.sqrt(dt))\n d = 1 / u\n b = r - div\n q = 1 / 2 + 1 / 2 * (b - 1 / 2 * sigma ** 2) * np.sqrt(dt) / sigma # P(up movement)\n return dt, u, d, q, b", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n return paramDict", "def calc_parameters(T, N, sigma, r, div):\n dt = T/N\n u = np.exp(sigma*np.sqrt(dt))\n d = 1/u\n b = r-div\n q = 1/2 + 1/2 * (b - 1/2 * sigma**2)*np.sqrt(dt)/sigma # P(up movement)\n return(dt, u, d, q, b)", "def random():\n # only care about the value of second_moment:\n # curve = scale * e**(-second_moment^2 q^2)/q^2\n # scale = 6 pi/100 (contrast/density*absorbed_amount)^2 * Vf/radius\n # the remaining parameters can be randomly generated from zero to\n # twice the default value as done by default in compare.py\n pars = dict(\n scale=1,\n second_moment=10**np.random.uniform(1, 3),\n )\n return pars", "def N2_f(d1,d2,rho):\n import statsmodels.sandbox.distributions.extras as extras\n muStandardNormal=0.0 # mean of a standard normal distribution \n varStandardNormal=1.0 # variance of standard normal distribution \n upper=([d1,d2]) # upper bound for two values\n v=varStandardNormal # simplify our notations\n mu=muStandardNormal # simplify our notations\n covM=([v,rho],[rho,v])\n return extras.mvnormcdf(upper,mu,covM)", "def get_means_and_scales(self):\n return self.optim.parameters[::2], np.exp(self.optim.parameters[1::2])", "def initializeDistribution(self):\n self.convertToDistrDict['Laguerre'] = self.convertLaguerreToGamma\n self.convertToQuadDict ['Laguerre'] = self.convertGammaToLaguerre\n self.measureNormDict ['Laguerre'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed):\n # and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low)\n #self.lowerBoundUsed = 0.0\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Laguerre'\n self.preferredPolynomials = 'Laguerre'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = 0.0\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low,a,b)", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n paramDict['low'] = self.low\n return paramDict", "def parameters(conv, orthogonal):\n nrm = operator_one_norm(conv.weight).detach().cpu().numpy()\n\n if nrm > 15:\n print('Overflow likely, norm={}'.format(nrm))\n\n m = np.arange(1, len(THETA) + 1)\n vals = m * np.ceil(nrm / THETA)\n mstar = min(1 + np.argmin(vals), 56)\n s = int(np.ceil(nrm / THETA[mstar - 1]))\n\n return mstar, s", "def get_initial_parameters(token_segs):\r\n estems = {} # tracks the average probability of each root\r\n esuffix = {} # tracks the average probability of each suffix\r\n etrans = {} # tracks the average probability of each (transition, feature) pair\r\n eftrans = {} # tracks the average probability of each feature (interface between stem and suffix)\r\n\r\n # collect the probabilities of each object, to be normalized (divided by their totals) later\r\n for ts_list in token_segs:\r\n avg_prob = 1.0 / len(ts_list)\r\n for ts in ts_list:\r\n root = ts.root\r\n rand_val = 1.0\r\n if root in estems:\r\n estems[root] += rand_val * avg_prob\r\n else: estems[root] = rand_val * avg_prob\r\n\r\n suffix = ts.suffix\r\n if suffix in esuffix:\r\n esuffix[suffix] += rand_val * avg_prob\r\n else: esuffix[suffix] = rand_val * avg_prob\r\n\r\n trans = ts.trans\r\n ftrans = feature(root, suffix)\r\n if (trans, ftrans) in etrans:\r\n etrans[(trans, ftrans)] += rand_val * avg_prob\r\n else: etrans[(trans, ftrans)] = rand_val * avg_prob\r\n\r\n if ftrans in eftrans:\r\n eftrans[ftrans] += rand_val * avg_prob\r\n else: eftrans[ftrans] = rand_val * avg_prob\r\n\r\n # divide by the totals\r\n probstems = estems\r\n probsum = sum(probstems.values())\r\n for stem in probstems:\r\n probstems[stem] /= probsum\r\n\r\n probsuffix = esuffix\r\n probsum = sum(probsuffix.values())\r\n for suffix in probsuffix:\r\n probsuffix[suffix] /= probsum\r\n\r\n probtrans = etrans\r\n for trans, ftrans in probtrans:\r\n probtrans[(trans, ftrans)] /= eftrans[ftrans]\r\n\r\n return probstems, probsuffix, probtrans", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def get_distribution(self):\n\n # If the distributions have been updated before.\n if self.update_number > 0:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number,1):\n probability = self.document_distribution_over_topic[m][k] / self.update_number\n self.document_distribution_over_topic[m][k] = probability\n for k in range(0, self.topic_number,1):\n for v in range(0, self.term_number,1):\n probability = self.topic_distribution_over_term[k][v] / self.update_number\n self.topic_distribution_over_term[k][v] = probability\n # The distributions have not been updated once.\n else:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number, 1):\n self.document_distribution_over_topic[m][k] = (\n (self.document_topic_count_matrix[m][k] + self.alpha[k]) / (\n self.sum_document_by_topic_count[m] + self.sum_alpha))\n for k in range(0, self.topic_number, 1):\n for v in range(0, self.term_number, 1):\n self.topic_distribution_over_term[k][v] = (\n (self.topic_term_count_matrix[k][v] + self.beta[v]) / (\n self.sum_topic_by_term_count[k] + self.sum_beta))", "def generate_moments(hyper, params):\n\n k, d = hyper['k'], hyper['d']\n\n p = params # Shorthand, don't judge\n m = {} # Moments\n for x1 in xrange(1,d+1):\n m[(x1,)] = sum( p[(h,x1)] * p[(h,)] for h in xrange(1,k+1) )\n for x2 in xrange(1,d+1):\n m[(x1,x2)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,)] for h in xrange(1,k+1) )\n for x3 in xrange(1,d+1):\n m[(x1,x2,x3)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,x3)] * p[(h,)] for h in xrange(1,k+1) )\n return m", "def update_moments_r(self):\n denominator = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 )\n nominator1 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 ) * self.constellation\n \n nominator2 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2) * np.power(self.constellation, 2)\n try:\n \n moment1 = nominator1.sum(axis=1) / denominator.sum(axis=1)\n moment2 = nominator2.sum(axis=1) / denominator.sum(axis=1)\n assert np.all(np.logical_not(np.isnan(moment1))) and np.all(np.logical_not(np.isnan(moment2)))\n except:\n print(\"Oops! That was no valid number. Try again...\")\n\n \n self.mu = moment1\n return moment1, moment2", "def var_parameters(jd,mag,err):\n\n mean = np.mean(mag)\n nepochs = float(len(jd))\n\n chi = np.sum( (mag - mean)**2. / err**2. )\n p_chi = chi2.cdf(chi,(nepochs-1))\n\n\n a = (mag-mean)**2\n ex_var = (np.sum(a-err**2)/((nepochs*(mean**2))))\n sd = np.sqrt((1./(nepochs-1))*np.sum(((a-err**2)-ex_var*(mean**2))**2))\n ex_verr = sd/((mean**2)*np.sqrt(nepochs))\n\n\n return p_chi, ex_var, ex_verr", "def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n m_v = np.zeros(2)\r\n print(\"m_v shape: \" ,m_v.shape)\r\n c_v = np.array( [ [ beta , 0 ] , [ 0 , beta ] ] )\r\n print(\"c_v shape: \",c_v.shape)\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n print(\"x_s shape: \",x_s.shape)\r\n density = util.density_Gaussian(m_v , c_v , x_s)\r\n #print(density)\r\n print(\"length density \",len(density))\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )) )\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.title('p(a)')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x')\r\n return", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['strategy'] = self.strategy\n paramDict['nPoints'] = self.nPoints\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar # rate parameter\n paramDict['low' ] = self.low # lower domain boundary\n return paramDict", "def PH2From3Moments (moms, prec=1e-14):\n\n m1, m2, m3 = moms\n\n # check moment boounds\n m2l = APH2ndMomentLowerBound(m1, 2) \n m3l = APH3rdMomentLowerBound(m1, m2, 2) \n m3u = APH3rdMomentUpperBound(m1, m2, 2) \n \n if m2<m2l:\n raise Exception(\"The given second moment is not feasible!\") \n if m3<m3l:\n raise Exception(\"The given third moment is not feasible (too small)!\")\n if m3>m3u:\n raise Exception(\"The given third moment is not feasible (too large)!\")\n \n # check if we have an exponential distribution\n if abs(m2/m1/m1-2.0) < prec:\n return (np.matrix([1]), np.matrix([[-1/m1]]))\n \n # calculate parameters\n b = 3.0*m1*m2-m3\n c = 3.0*m2*m2-2.0*m1*m3\n e = -2.0*m1*m1+m2\n a = b*b+6.0*c*e\n if a<0:\n a = 0\n a = math.sqrt(a)\n if c>0:\n lambda1 = (b - a) / c\n lambda2 = (b + a) / c\n p = (-b-6.0*m1*e+a) / (b+a)\n elif c<0:\n lambda1 = (b + a) / c\n lambda2 = (b - a) / c\n p = (b+6.0*m1*e+a) / (-b+a)\n elif c==0:\n lambda1 = 0\n lambda2 = 1.0 / m1\n p = 0\n \n # return the result\n return (np.matrix([p,1.0-p]), np.matrix([[-lambda1, lambda1], [0,-lambda2]]))", "def prop_func_form_params(param1,param2,*arg):\n return np.log(MH.simple_2D_Gauss(param1-param2,arg[0],arg[1]))", "def fdist(param1, param2):\n return(prng.gamma(param1, param2))", "def initializeDistribution(self):\n self.convertToDistrDict['Jacobi'] = self.convertJacobiToBeta\n self.convertToQuadDict ['Jacobi'] = self.convertBetaToJacobi\n self.measureNormDict ['Jacobi'] = self.stdProbabilityNorm\n #this \"if\" section can only be called if distribution not generated using readMoreXML\n if (not self.upperBoundUsed) and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,self.low)\n else:\n if self.lowerBoundUsed == False:\n a = 0.0\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,a,b,self.low)\n self.preferredPolynomials = 'Jacobi'\n self.compatibleQuadrature.append('Jacobi')\n self.compatibleQuadrature.append('ClenshawCurtis')", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['apex' ] = self.apex\n paramDict['min' ] = self.min\n paramDict['max' ] = self.max\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mu' ] = self.mu\n return paramDict", "def distribution(self):\n \n #external_distribution serves both the purpose of external setting of distribution and the caching of distribution()\n if self.external_distribution:\n return self.external_distribution_array;\n else:\n energy_vector = []\n superset = self.generate_superset(0) \n \n for i in superset:\n state = self.ket(i)\n \n norm_squared = np.dot(state.T, state)\n \n if norm_squared > 0: #zero is appended at the end\n energy = np.dot(state.T, np.dot( self.epsilon, state))\n interaction = np.dot(state.T, np.dot( self.u, state))/2.0 #divide by two. Otherwise, <l r| U |l r > = U_LR + U_RL = 2U\n #print state, np.dot(self.u, state) \n #print interaction\n energy_vector.append( energy + interaction )\n \n energy_vector.insert(0, 0.0) \n probability = np.exp( np.multiply(-self.beta, energy_vector)) \n probability /= probability.sum() \n return probability", "def initializeDistribution(self):\n self.convertToDistrDict['Hermite'] = self.convertHermiteToNormal\n self.convertToQuadDict ['Hermite'] = self.convertNormalToHermite\n self.measureNormDict ['Hermite'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed) and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicNormalDistribution(self.mean,\n self.sigma)\n self.lowerBound = -sys.float_info.max\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Hermite'\n self.preferredPolynomials = 'Hermite'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = -sys.float_info.max\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicNormalDistribution(self.mean,\n self.sigma,\n a,b)", "def prop_dist_form_params(*arg):\n return np.random.multivariate_normal(*arg)", "def _get_prior_params(self):\n # relation transformation matrix\n M_mu = np.eye(self.n_polarities, dtype=\"float32\")\n M_mu[1, :] = [0., 0.3, 0.]\n M_mu = np.tile(M_mu, (self.n_rels, 1)).reshape(\n self.n_rels, self.n_polarities, self.n_polarities\n )\n # for rel, rel_idx in iteritems(self.rel2idx):\n # # swap axes for contrastive relations\n # if check_rel(rel, CONTRASTIVE_RELS):\n # mu_i = M_mu[rel_idx]\n # mu_i[[0, 2]] = mu_i[[2, 0]]\n M_mu = torch.tensor(M_mu)\n M_sigma = torch.tensor(\n np.ones((self.n_rels, self.n_polarities, self.n_polarities),\n dtype=\"float32\")\n )\n # beta\n beta_p = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n beta_q = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n # z_epsilon\n z_epsilon_p = torch.tensor(1.)\n z_epsilon_q = torch.tensor(15.)\n # scale factor\n scale_factor = torch.tensor(34.)\n return {\"M_mu\": M_mu, \"M_sigma\": M_sigma, \"beta_p\": beta_p,\n \"beta_q\": beta_q, \"z_epsilon_p\": z_epsilon_p,\n \"z_epsilon_q\": z_epsilon_q, \"scale_factor\": scale_factor}", "def demo_indef():\n n_dim = 2\n A = np.eye(n_dim)\n A[1, 1] = -1.\n covar = np.eye(n_dim)\n mean = np.zeros(n_dim)\n approx = approx_quad_form(mean, covar, A)\n\n # Sample from true dist\n n_sample = 10000\n x = np.random.multivariate_normal(mean, covar, n_sample)\n q_samples = np.zeros(n_sample)\n for i in range(n_sample):\n q_samples[i] = x[i] @ A @ x[i]\n\n q = np.linspace(-10, 10)\n\n plt.plot(\n q, approx(q), label='Approx.',\n color='tab:blue', linestyle='--')\n bins = np.linspace(-8, 8, 81)\n bins[0] = -np.inf\n bins[-1] = np.inf\n plt.hist(\n q_samples, density=True, histtype='stepfilled',\n bins=bins,\n alpha=0.5, color='black', label='Samples')\n plt.xlabel('q')\n plt.ylabel('pdf(q) [-]')\n plt.legend()\n\n central_moments_sample = scipy.stats.moment(\n q_samples, moment=[0, 1, 2, 3, 4])\n print(central_moments_sample)", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['mu'] = self.mean\n retDict['sigma'] = self.sigma\n return retDict", "def initializeDistribution(self):\n self.raiseAMessage('initialize distribution')\n mu = distribution1D.vectord_cxx(len(self.mu))\n for i in range(len(self.mu)):\n mu[i] = self.mu[i]\n covariance = distribution1D.vectord_cxx(len(self.covariance))\n for i in range(len(self.covariance)):\n covariance[i] = self.covariance[i]\n if self.method == 'spline':\n if self.covarianceType != 'abs':\n self.raiseAnError(IOError,'covariance with type ' + self.covariance + ' is not implemented for ' + self.method + ' method')\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu)\n elif self.method == 'pca':\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu, str(self.covarianceType), self.rank)\n if self.transformation:\n self.lowerBound = [-sys.float_info.max]*self.rank\n self.upperBound = [sys.float_info.max]*self.rank\n else:\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimension)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimension)]", "def _init_params(self):\n self.W_ems = []\n self.b_ems = []\n if self.rank_n_approx:\n W_em1 = self.init_fn[0](self.n_in,\n self.rank_n_approx,\n self.sparsity[0],\n self.scale[0],\n self.rng)\n W_em2 = self.init_fn[0](self.rank_n_approx,\n self.n_hids[0],\n self.sparsity[0],\n self.scale[0],\n self.rng)\n self.W_em1 = theano.shared(W_em1,\n name='W1_0_%s'%self.name)\n self.W_em2 = theano.shared(W_em2,\n name='W2_0_%s'%self.name)\n self.W_ems = [self.W_em1, self.W_em2]\n\n else:\n W_em = self.init_fn[0](self.n_in,\n self.n_hids[0],\n self.sparsity[0],\n self.scale[0],\n self.rng)\n self.W_em = theano.shared(W_em,\n name='W_0_%s'%self.name)\n self.W_ems = [self.W_em]\n\n self.b_em = theano.shared(\n self.bias_fn[0](self.n_hids[0], self.bias_scale[0],self.rng),\n name='b_0_%s'%self.name)\n self.b_ems = [self.b_em]\n\n for dx in range(1, self.n_layers):\n W_em = self.init_fn[dx](self.n_hids[dx-1] / self.pieces[dx],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n self.rng)\n W_em = theano.shared(W_em, name='W_%d_%s'%(dx,self.name))\n self.W_ems += [W_em]\n\n b_em = theano.shared(\n self.bias_fn[dx](self.n_hids[dx], self.bias_scale[dx],self.rng),\n name='b_%d_%s'%(dx,self.name))\n self.b_ems += [b_em]\n\n self.params = [x for x in self.W_ems]\n\n if self.learn_bias and self.learn_bias!='last':\n self.params = [x for x in self.W_ems] + [x for x in self.b_ems]\n elif self.learn_bias == 'last':\n self.params = [x for x in self.W_ems] + [x for x in\n self.b_ems][:-1]\n self.params_grad_scale = [self._grad_scale for x in self.params]\n if self.weight_noise:\n self.nW_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_ems]\n self.nb_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_ems]\n\n self.noise_params = [x for x in self.nW_ems] + [x for x in self.nb_ems]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape)\n for x in self.noise_params]", "def Rosenblatt_Transform(dist, x_i): \n if dist.stats(moments = 's') > 1 or dist.stats(moments = 's') < -1:\n \n x_N_mean = dist.median()\n x_N_std = (x_i - x_N_mean)/sst.norm.ppf(dist.cdf(x_i))\n \n return(x_N_mean, x_N_std)\n \n else:\n x_N_std = sst.norm.pdf(sst.norm.ppf(dist.cdf(x_i)))/dist.pdf(x_i)\n x_N_mean = x_i - sst.norm.ppf(dist.cdf(x_i))*x_N_std\n return(x_N_mean, x_N_std)", "def _get_marginal_pdfs( res, nbins=51, verbose=True ):\n\tvparam_names = res.vparam_names\n\tweights = res.weights\n\tsamples = res.samples\n\n\tpdfdict = {}\n\n\tfor param in vparam_names :\n\t\tipar = vparam_names.index( param )\n\t\tparamvals = samples[:,ipar]\n\n\t\tif nbins>1:\n\t\t\tif param in res.bounds :\n\t\t\t\tparvalmin, parvalmax = res.bounds[param]\n\t\t\telse :\n\t\t\t\tparvalmin, parvalmax = 0.99*paramvals.min(), 1.01*paramvals.max()\n\t\t\tparambins = np.linspace( parvalmin, parvalmax, nbins, endpoint=True ).flatten()\n\t\t\tbinindices = np.digitize( paramvals, parambins )\n\n\t\t\t# we estimate the marginalized pdf by summing the weights of all points in the bin,\n\t\t\t# where the weight of each point is the prior volume at that point times the\n\t\t\t# likelihood, divided by the total evidence\n\t\t\tpdf = np.array( [ weights[np.where( binindices==ibin )].sum() for ibin in range(len(parambins)) ] )\n\t\telse :\n\t\t\tparambins = None\n\t\t\tpdf = None\n\n\n\t\tmean = (weights * samples[:,ipar]).sum()\n\t\t#print(samples[:,ipar]-mean)\n\t\t#print(weights)\n\t\tstd = np.sqrt( (weights * (samples[:,ipar]-mean)**2 ).sum() )\n\n\n\t\tpdfdict[param] = (parambins,pdf,mean,std,res.logz)\n\n\t\tif verbose :\n\t\t\tif np.abs(std)>=0.1:\n\t\t\t\tprint( ' <%s> = %.2f +- %.2f'%( param, np.round(mean,2), np.round(std,2)) )\n\t\t\telif np.abs(std)>=0.01:\n\t\t\t\tprint( ' <%s> = %.3f +- %.3f'%( param, np.round(mean,3), np.round(std,3)) )\n\t\t\telif np.abs(std)>=0.001:\n\t\t\t\tprint( ' <%s> = %.4f +- %.4f'%( param, np.round(mean,4), np.round(std,4)) )\n\t\t\telse :\n\t\t\t\tprint( ' <%s> = %.3e +- %.3e'%( param, mean, std) )\n\n\n\t\tif param == 'x0' :\n\t\t\tsalt2 = sncosmo.Model( source='salt2')\n\t\t\tsalt2.source.set_peakmag( 0., 'bessellb', 'ab' )\n\t\t\tx0_AB0 = salt2.get('x0')\n\t\t\tmBmean = -2.5*np.log10( mean / x0_AB0 )\n\t\t\tmBstd = 2.5*np.log10( np.e ) * std / mean\n\t\t\tmBbins = -2.5*np.log10( parambins / x0_AB0 )\n\n\t\t\tpdfdict['mB'] = ( mBbins, pdf, mBmean, mBstd )\n\t\t\tif verbose:\n\t\t\t\tprint( ' <%s> = %.3f +- %.3f'%( 'mB', np.round(mBmean,3), np.round(mBstd,3)) )\n\n\treturn( pdfdict )", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict", "def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n\r\n # mean of posterior distribution is the MAP estimate of the weights a\r\n # tau^2(from notes) is beta\r\n\r\n extra_col = np.ones((x.shape[0], 1))\r\n x = np.append(extra_col, x, axis = 1)\r\n\r\n alpha_map = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))@(np.transpose(x)@z)\r\n mu = alpha_map\r\n\r\n Cov = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))*sigma2\r\n\r\n num_x = 100\r\n num_y = 100\r\n\r\n xvalues = np.linspace(-1, 1, num = num_x)\r\n yvalues = np.linspace(-1, 1, num = num_y)\r\n X_grid, Y_grid = np.meshgrid(xvalues, yvalues)\r\n\r\n samples = np.column_stack((X_grid.flatten(), Y_grid.flatten()))\r\n\r\n density = util.density_Gaussian(mu.squeeze(), Cov, samples)\r\n density_grid = np.reshape(density, (num_x, num_y))\r\n\r\n plt.figure(1)\r\n plt.title(\"Posterior Distribution of α Given 5 Data Points\")\r\n plt.xlabel('$α_0$')\r\n plt.ylabel('$α_1$')\r\n plt.scatter(-0.1, -0.5, c='r')\r\n plt.contour(X_grid, Y_grid, density_grid, cmap=plt.cm.winter)\r\n plt.show()\r\n\r\n return (mu,Cov)", "def get_prob_for_distributions(p):\n w1 = p[0]\n mu1 = p[1]\n sigma1 = p[2]\n w2 = p[3]\n mu2 = p[4]\n sigma2 = p[5]\n w3 = p[6]\n mu3 = p[7]\n sigma3 = p[8]\n dist_range = (0, 4.330310991999920844e+01)\n x = np.linspace(dist_range[0], dist_range[1], 1000)\n A1 = np.array(w1 * mlab.normpdf(x, mu1, sigma1)).sum()\n A2 = np.array(w2 * mlab.normpdf(x, mu2, sigma2)).sum()\n A3 = np.array(w3 * mlab.normpdf(x, mu3, sigma3)).sum()\n p1 = A1 / (A1 + A2 + A3)\n p2 = A2 / (A1 + A2 + A3)\n p3 = A3 / (A1 + A2 + A3)\n return p1, p2, p3", "def Get_params(numparams, dt, D):\n # bounds from table 1 Kowalek et al 2020\n Nmin, Nmax = 30, 600\n Bmin, Bmax = 1, 6\n Rmin, Rmax = 1, 17\n alphamin, alphamax = 0.3, 0.7\n Qmin, Qmax = 1, 9\n\n # Gen parameters\n Q = np.random.uniform(Qmin, Qmax, size=numparams)\n Q1, Q2 = Q, Q\n\n NsND = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsAD = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsCD = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsDM = np.random.randint(Nmin, Nmax + 1, size=numparams)\n TDM = NsDM * dt\n\n B = np.random.uniform(Bmin, Bmax, size=numparams)\n r_c = np.sqrt(D * NsCD * dt / B) # solving for r_c in eq. 8 Kowalek\n\n R = np.random.uniform(Rmin, Rmax, size=numparams)\n v = np.sqrt(R * 4 * D / TDM) # solving for v in eq. 7 Kowalek\n\n alpha = np.random.uniform(alphamin, alphamax, size=numparams)\n\n # Compute sigma for ND, AD, CD from eq. 12 Kowalek\n sigmaND = np.sqrt(D * dt) / Q1\n sigmaAD = np.sqrt(D * dt) / Q1\n sigmaCD = np.sqrt(D * dt) / Q1\n\n # Compute sigma for DM from eq. 12 Kowalek\n sigmaDM = np.sqrt(D * dt + v ** 2 * dt ** 2) / Q2\n\n return np.array(\n [\n NsND,\n NsAD,\n NsCD,\n NsDM,\n D * np.ones(numparams),\n dt * np.ones(numparams),\n r_c,\n v,\n alpha,\n sigmaND,\n sigmaAD,\n sigmaCD,\n sigmaDM,\n ]\n ).T", "def _calpara(self):\n self.up = math.exp(self.sigma*math.sqrt(self.deltatime))\n self.down = math.exp(-self.sigma*math.sqrt(self.deltatime))\n self.upprob = (math.exp((self.r-self.d)*self.deltatime)-self.down)/(self.up-self.down)", "def APH2ndMomentLowerBound (m1, n):\n\n return float(m1)*m1*(n+1) / n", "def _construct_mom_stuff(self):\n a = self.mom_mix_rate\n dist_mean = self.GN.dist_mean\n dist_cov = self.GN.dist_cov\n # Get the generated sample observations for this batch, transformed\n # linearly into the desired space for moment matching...\n X_b = T.dot(self.GN.output, self.mom_match_proj)\n # Get their mean\n batch_mean = T.mean(X_b, axis=0)\n # Get the updated generator distribution mean\n new_mean = ((1.0 - a[0]) * self.GN.dist_mean) + (a[0] * batch_mean)\n # Use the mean to get the updated generator distribution covariance\n X_b_minus_mean = X_b - new_mean\n # Whelp, I guess this line needs the cast... for some reason...\n batch_cov = T.dot(X_b_minus_mean.T, X_b_minus_mean) / T.cast(X_b.shape[0], 'floatX')\n new_cov = ((1.0 - a[0]) * self.GN.dist_cov) + (a[0] * batch_cov)\n # Get the cost for deviation from the target distribution's moments\n mean_err = new_mean - self.target_mean\n cov_err = (new_cov - self.target_cov)\n mm_cost = self.mom_match_weight[0] * \\\n (T.sum(mean_err**2.0) + T.sum(cov_err**2.0))\n # Construct the updates for the running estimates of the generator\n # distribution's first and second-order moments.\n mom_updates = OrderedDict()\n mom_updates[self.GN.dist_mean] = new_mean\n mom_updates[self.GN.dist_cov] = new_cov\n return [mm_cost, mom_updates]", "def aicpdf(xvals, distribution, params):\n if distribution == 'pareto':\n pvals = (params['xmin'] * params['mu'] ** params['xmin']) / (xvals ** (params['xmin'] + 1))\n return pvals\n \n elif distribution == 'lognormal':\n #import pdb; pdb.set_trace()\n pvals = np.exp(-(np.log(xvals) - params['mu'])**2 / (2 * params['sigma']**2)) / (xvals * params['sigma'] * np.sqrt(2*np.pi))\n return pvals\n \n elif distribution == 'normal':\n pvals = np.exp(-(xvals - params['mu'])**2 / (2 * params['sigma']**2)) / (params['sigma'] * np.sqrt(2*np.pi))\n return pvals\n \n elif distribution == 'exponential':\n pvals = params['lambda'] * np.exp(-params['lambda'] * xvals)\n return pvals \n \n elif distribution == 'boundedpl':\n #pvals = (params['mu'] * (params['mu'] ** params['xmax'] - params['xmin'] ** params['xmax'])) / (xvals ** (params['mu'] + 1))\n #mu * (xmax ^ mu - xmin ^ mu) / x ^ (mu+1)\n pvals = (params['mu'] * (params['xmax'] ** params['mu'] - params['xmin'] ** params['mu'])) / (xvals ** (params['mu'] + 1))\n return pvals", "def moment(self, n, mu, sigma):\n return scipy_norm.moment(n, mu, sigma)", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def computeMoments(x):\n return (abs(stats.skew(x)),abs(stats.kurtosis(x,None,True)))", "def initializeDistribution(self):\n self.minVal = min(math.exp(self.upperBound),math.exp(self.lowerBound))\n self.maxVal = max(math.exp(self.upperBound),math.exp(self.lowerBound))", "def initializeDistribution(self):\n self.convertToDistrDict['Legendre'] = self.convertLegendreToUniform\n self.convertToQuadDict ['Legendre'] = self.convertUniformToLegendre\n self.measureNormDict ['Legendre'] = self.stdProbabilityNorm\n self.convertToDistrDict['ClenshawCurtis'] = self.convertLegendreToUniform\n self.convertToQuadDict ['ClenshawCurtis'] = self.convertUniformToLegendre\n self.measureNormDict ['ClenshawCurtis'] = self.stdProbabilityNorm\n self._distribution = distribution1D.BasicUniformDistribution(self.lowerBound,self.lowerBound+self.range)", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale))", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['mu'] = self.mean\n retDict['sigma'] = self.sigma\n retDict['low'] = self.low\n return retDict", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def estimate_gaussian_params(X):\n mu = X.mean(axis=0)\n var = X.std(axis=0)**2.0\n return mu,var", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::vbfH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)\", mu)')", "def like_one(theta,dt,dmag,sigma):\n\n gamma, A = theta\n aux=(1/np.sqrt(2*np.pi*Veff2(dt,sigma,A,gamma)))*np.exp(-1.0*(dmag**2)/(2.0*Veff2(dt,sigma,A,gamma)))\n\n return aux", "def update_params(x, prior, posterior):\r\n mu0, kappa0, alpha0, beta0 = prior\r\n mu_t, kappa_t, alpha_t, beta_t = posterior\r\n return np.r_[mu0, (kappa_t*mu_t + x)/(kappa_t + 1)], \\\r\n np.r_[kappa0, kappa_t + 1], \\\r\n np.r_[alpha0, alpha_t + 0.5], \\\r\n np.r_[beta0, beta_t + 0.5*kappa_t*(x - mu_t)**2/(kappa_t + 1)]", "def prior_params_tree(self):\n id = {name:i for i, name in enumerate(list(self.tree.keys()))}\n n_nodes = len(id)\n dist_mx = np.zeros((n_nodes, n_nodes))\n\n for node1, edges in self.tree.items():\n for node2, dist in edges.dist:\n dist_mx[id[node1], id[node2]] = dist\n dist_mx[id[node2], id[node1]] = dist\n\n # while np.count_nonzero(dist_mx) < (n_nodes ** 2 - n_nodes):\n for _ in range(20):\n for i, j in combinations(range(n_nodes), 2):\n if dist_mx[i,j] > 0:\n continue\n row_i = dist_mx[i]\n row_j = dist_mx[j]\n value = (row_i + row_j) * (row_i > 0) * (row_j > 0)\n dist_mx[i, j] = dist_mx[j, i] = - max(np.unique(value))\n dist_mx = np.abs(dist_mx)\n\n evolve_rate = []\n for node1, node2 in combinations(self.m_cov.keys(), 2):\n mx_cov_dist = np.abs(self.m_cov[node1] - self.m_cov[node2])\n elements = mx_cov_dist[np.triu_indices(len(mx_cov_dist))]\n norm_elements = elements / dist_mx[id[node2], id[node1]]\n evolve_rate += list(norm_elements)\n\n\n\n df = np.mean([p.shape[0] for _, p in self.m_profiles.items()])\n p_theta_alpha = df/2\n # p_theta_alpha = 4\n p_theta_beta = np.percentile(evolve_rate, 75) * (p_theta_alpha - 1)\n # print(p_theta_alpha, p_theta_beta)\n return p_theta_alpha, p_theta_beta", "def __init__(self, \n param_epsilon, \n param_tau,\n param_u, \n param_gamma_left,\n param_gamma_right,\n param_beta):\n self.epsilon = param_epsilon\n self.tau = param_tau\n self.u = param_u\n self.gamma_left = param_gamma_left\n self.gamma_right = param_gamma_right\n \n self.sigma_retarded = 1j * (self.gamma_left + self.gamma_right) / 2.0\n self.sigma_advanced = - self.sigma_retarded;\n \n self.dim = len(self.u)\n self.rho = np.zeros((2**self.dim))\n \n self.beta = param_beta\n \n self.cutoff_chance = 0.0001\n self.external_distribution = False\n self.external_distribution_array = self.distribution()\n self.external_distribution = True", "def _get_parameters(n, j, domain, g, ncap):\n alphas, betas = rc.recurrenceCoefficients(n - 2, lb=domain[0], rb=domain[1],\n j=j, g=g, ncap=ncap)\n omegas = g * np.array(alphas)\n ts = g * np.sqrt(np.array(betas)[1::])\n c0 = np.sqrt(betas[0])\n return omegas, ts, c0", "def aicmle(timeSeries, distribution):\n mlevals = {} \n if distribution == 'pareto':\n mlevals['xmin'] = np.min(timeSeries)\n mlevals['mu'] = 1 - timeSeries.shape[0] / (timeSeries.shape[0] * np.log(mlevals['xmin']) - np.sum(np.log(timeSeries)))\n \n elif distribution == 'lognormal':\n mlevals['mu'] = np.sum(np.log(timeSeries)) / timeSeries.shape[0]\n mlevals['sigma'] = np.sqrt(np.sum( (np.log(timeSeries) - mlevals['mu'])**2) / timeSeries.shape[0])\n \n elif distribution == 'normal':\n mlevals['mu'] = np.mean(timeSeries)\n mlevals['sigma'] = np.sqrt(sum((timeSeries - np.mean(timeSeries))**2) / timeSeries.shape[0])\n \n elif distribution == 'exponential':\n mlevals['lambda'] = 1.0 / np.mean(timeSeries)\n \n elif distribution == 'boundedpl':\n mlevals['xmin'] = np.min(timeSeries)\n mlevals['xmax'] = np.max(timeSeries)\n minmuEstimate = 1.1\n mlevals['mu'] = fmin(lambda mu: -len(timeSeries) * np.log( (mu - 1) / (np.min(timeSeries)**(1 - mu) - np.max(timeSeries)**(1 - mu))) + mu * np.sum(np.log(timeSeries)), minmuEstimate, disp=0)[0]\n\n return mlevals", "def __init__(self, mean=0.0, sigma=1.0):\n super().__init__()\n self.mean = mean\n self.sigma = sigma\n self.hasInfiniteBound = True\n self.type = 'Normal'\n self.distType = 'Continuous'\n self.compatibleQuadrature.append('Hermite')\n self.compatibleQuadrature.append('CDF')\n #THESE get set in initializeDistribution, since it depends on truncation\n #self.preferredQuadrature = 'Hermite'\n #self.preferredPolynomials = 'Hermite'", "def compute_t_params(mu, kappa, alpha, beta):\r\n mu_, sigma2_, dof_ = mu, beta*(kappa + 1)/(alpha*kappa), 2*alpha\r\n return mu_, sigma2_, dof_", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['n' ] = self.n\n paramDict['p' ] = self.p\n return paramDict", "def generate_stat(sample_size, sparsity = 0, amplitude = 0, sigma = 1):\n var = generate_variable(sample_size, sparsity, amplitude, sigma)\n y_obs = var[0]\n \n \"\"\" \n f is equal to -X(t,theta) and we will minimize f (max. X)\n \"\"\"\n def f(x):\n \"\"\" \n f(x)=-X(t,theta) where x[0]=t and x[1]=theta\n \"\"\"\n res = np.real(np.exp(-1j*x[1])*\\\n sum(y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1))) \n res = -res/np.sqrt(2*sample_size+1) \n return res\n \n def grad_f(x):\n \"\"\" \n gradient of f\n \"\"\"\n res1 = np.real(np.exp(-1j*x[1])*\\\n sum(1j*k*y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1)))\n res1 = -res1/np.sqrt(2*sample_size+1)\n \n res2 = np.real(np.exp(-1j*x[1])*\\\n sum(-1j*y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1)))\n res2 = -res2/np.sqrt(2*sample_size+1)\n return np.array([res1, res2])\n \n #% Minimizing f\n \n \"\"\" \n we minimize on [0, 2pi]^2\n \"\"\"\n bnds = ((0, 2*np.pi), (0, 2*np.pi))\n \n \"\"\" \n We begin by a greedy search of the initialization point over a grid of size 126^2\n the initialization point is init\n \"\"\"\n x = y = np.arange(0, 2*np.pi, 0.05)\n steps = 126\n X, Y = np.meshgrid(x, y)\n val = np.array([f([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])\n init = np.argmin(val)\n x1 = init%steps\n x2 = (init-x1)/steps\n init = [x1*0.05, x2*0.05]\n \n \"\"\" \n we minimize f...\n \"\"\"\n result = sco.minimize(f, init, method=\"L-BFGS-B\",\\\n jac=grad_f, bounds=bnds, tol=1e-15)\n \n \"\"\" \n (t1,theta1) is the argmax of X(t, theta) and l1=$\\lambda_1$\n \"\"\"\n t1 = result.x[0]\n theta1 = result.x[1]\n l1 = -f([t1,theta1])\n \n \n \"\"\" \n Function g(x) is equal to (X(t1,theta1)-X(x))/(1-rho((t1,theta1)-x))\n \"\"\"\n def g(x):\n a0 = x[0]-t1\n a1 = x[1]-theta1\n N = 2*sample_size+1\n \n vec = np.array([a0,a1])\n r = np.linalg.norm(vec)\n \"\"\" \n the value for r=0 is set to l1 (note that r=0 corresponds to x=(t1,theta1))\n \"\"\" \n res = l1 \n \n if (0<r) & (r<0.00001):\n \"\"\" \n we look a values near (t1,theta1) for which an indetermination occurs\n \"\"\" \n alpha= np.arccos(np.clip(a0/np.sqrt(a0**2+a1**2), -1.0, 1.0))\n u0 = np.cos(alpha)\n u1 = np.sin(alpha)\n \"\"\" \n u0,u1 defines the direction (unit vector)\n \"\"\"\n denom = sum((k*np.cos(alpha)-np.sin(alpha))**2*\\\n (np.sinc((r*(k*np.cos(alpha)-np.sin(alpha)))/(2*np.pi)))**2\\\n for k in range(-sample_size,sample_size+1))/N\n \"\"\" \n denom computes the denominator\n \"\"\"\n \n# \"\"\" \n# We use simpson rule for the numerator\n# \"\"\"\n# h = np.linspace(0,1,500)\n# \n# b0 = t1 + h*a0\n# b1 = theta1 + h*a1\n# \n# value = (1-h)*(u0**2*\\\n# np.real(np.exp(-1j*b1)*sum(-k**2*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))\\\n# +2*u0*u1*\\\n# np.real(np.exp(-1j*b1)*sum(k*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))\\\n# +u1**2*\\\n# np.real(np.exp(-1j*b1)*sum((-1)*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))) \n# value = value/np.sqrt(N)\n# \n# num = sci.simps(value, h)\n \n \"\"\" \n we use a quadrature for the numerator\n \"\"\" \n fun_int = lambda w: (1-w)*(u0**2*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum(-k**2*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))\\\n +2*u0*u1*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum(k*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))\\\n +u1**2*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum((-1)*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))) \n \n num = np.mean(sci.quad(fun_int, 0, 1, epsabs=1e-15, epsrel=1e-15, limit=1000))\n \n res = -num/denom\n \n if (r>=0.00001):\n \"\"\" \n we look a values far (t1,theta1) for which there is no indetermination\n \"\"\" \n res = (l1+f(x))/(1-(np.cos(a1)*dirichlet(a0,N)/N))\n \n return res \n \"\"\" \n we minimize g on [0, 2pi]^2 an dwe llok for the initialization point\n \"\"\"\n val2 = np.array([g([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])\n init2 = np.argmin(val2)\n x1 = init2%steps\n x2 = (init2-x1)/steps\n init2 = [x1*0.05, x2*0.05] \n result2 = sco.minimize(g, init2, method=\"L-BFGS-B\", bounds=bnds, tol=1e-15) \n \"\"\" \n argmin of g\n \"\"\"\n t2 = result2.x[0]\n theta2 = result2.x[1] \n \"\"\" \n value of lambda_2\n \"\"\"\n l21 = l1-result2.fun \n a0 = t2-t1\n a1 = theta2-theta1\n N = 2*sample_size+1\n l22 = l1-(l1+f([t2,theta2]))/(1-(np.cos(a1)*dirichlet(a0,N)/N))\n l2 = max(l21,l22)\n \"\"\" \n we compute the statistic\n \"\"\"\n alpha1 = (1/3)*sample_size*(sample_size+1)\n alpha2 = (1/np.sqrt(N))*\\\n sum((k**2-alpha1)*\\\n np.real(y_obs[k+sample_size]*np.exp(1j*(k*t1-theta1))) \\\n for k in range(-sample_size,sample_size+1))\n alpha3 = (1/np.sqrt(N))*sum(k*np.real(y_obs[k+sample_size]*\\\n np.exp(1j*(k*t1-theta1))) for k in range(-sample_size,sample_size+1)) \n stat = (sigma*(alpha1*l1+alpha2)*scs.norm.pdf(l1/sigma)+\\\n (alpha1*sigma**2-alpha3**2)*(1-scs.norm.cdf(l1/sigma)))/\\\n (sigma*(alpha1*l2+alpha2)*scs.norm.pdf(l2/sigma)+\\\n (alpha1*sigma**2-alpha3**2)*(1-scs.norm.cdf(l2/sigma))) \n \n return stat", "def moments2nd(data):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol)) \n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mrr = np.sum(rowgrid**2*data)/Isum\n Mcc = np.sum(colgrid**2*data)/Isum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*data)/Isum \n return Mcc, Mrr, Mrc", "def weights_treatment_parameters(init_dict, GRID):\n GRID = np.linspace(0.01, 0.99, num=99, endpoint=True)\n\n coeffs_untreated = init_dict[\"UNTREATED\"][\"params\"]\n coeffs_treated = init_dict[\"TREATED\"][\"params\"]\n cov = construct_covariance_matrix(init_dict)\n x = simulate_covariates(init_dict)\n\n # We take the specified distribution for the cost shifters from the paper.\n cost_mean, cost_sd = -0.0026, np.sqrt(0.270)\n v_mean, v_sd = 0.00, np.sqrt(cov[2, 2])\n\n eval_points = norm.ppf(GRID, loc=v_mean, scale=v_sd)\n\n ate_weights = np.tile(1.0, 99)\n tut_weights = norm.cdf(eval_points, loc=cost_mean, scale=cost_sd)\n\n tt_weights = 1 - tut_weights\n\n def tut_integrand(point):\n eval_point = norm.ppf(point, loc=v_mean, scale=v_sd)\n return norm.cdf(eval_point, loc=cost_mean, scale=cost_sd)\n\n def tt_integrand(point):\n eval_point = norm.ppf(point, loc=v_mean, scale=v_sd)\n return norm.cdf(eval_point, loc=cost_mean, scale=cost_sd)\n\n # Scaling so that the weights integrate to one.\n tut_scaling = quad(tut_integrand, 0.01, 0.99)[0]\n tut_weights /= tut_scaling\n\n tt_scaling = quad(tt_integrand, 0.01, 0.99)[0]\n tt_weights /= tt_scaling\n\n mte = mte_information(coeffs_treated, coeffs_untreated, cov, GRID, x, init_dict)\n\n return ate_weights, tt_weights, tut_weights, mte", "def _second_moment(R, sig_l, sig_m, lum, mass, Mbh, beta, tensor,\n sigmaPsf, normPsf, step, nrad, surf_l, pixSize):\n if (max(sigmaPsf) > 0) and (pixSize > 0): # PSF convolution\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n\n # Make grid linear in log of radius RR\n #\n rmax = np.max(R) + mx # Radius of circle containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in log(RR)\n rr = np.exp(logRad)\n\n # The model Vrms computation is only performed on the radial grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(rr)\n mgePol = np.empty_like(rr)\n rup = 3*np.max(sig_l)\n for j in range(rr.size): # Integration of equation (50)\n wm2Pol[j] = quadva(_integrand, [rr[j], rup],\n args=(sig_l, sig_m, lum, mass, Mbh, rr[j], beta, tensor))[0]\n mgePol[j] = np.sum(surf_l * np.exp(-0.5*(rr[j]/sig_l)**2))\n\n nx = np.ceil(rmax/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n xCar, yCar = np.meshgrid(x1, x1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + yCar**2) # Log radius of cartesian grid\n wm2Car = np.interp(r1, logRad, wm2Pol)\n mgeCar = np.interp(r1, logRad, mgePol)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normalization is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = np.sqrt(signal.fftconvolve(wm2Car, kernel, mode='same')\n / signal.fftconvolve(mgeCar, kernel, mode='same'))\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, x1, muCar, R/np.sqrt(2), R/np.sqrt(2))\n\n else: # No PSF convolution: just compute values\n\n mu = np.empty_like(R)\n rmax = 3*np.max(sig_l)\n for j in range(R.size):\n wm2Pol = quadva(_integrand, [R[j], rmax],\n args=(sig_l, sig_m, lum, mass, Mbh, R[j], beta, tensor))[0]\n mgePol = np.sum( surf_l * np.exp(-0.5*(R[j]/sig_l)**2) )\n mu[j] = np.sqrt(wm2Pol/mgePol)\n\n return mu", "def get_thrust_and_moment(self):\n\n f1 = self.k_f * self.omega_1 ** 2\n f2 = self.k_f * self.omega_2 ** 2\n \n # c is often used to indicate \"collective\" thrust\n c = f1 + f2\n \n M_x = (f1 - f2) * self.l\n return c, M_x", "def d2(self):\r\n return self.d1() - self.sigma*self.t**0.5", "def d2(self):\n d1 = self.d1()\n return d1 - self.sigma * (self.t **(0.5))", "def get_dists_2():\n d1 = Distribution(['0', '1'], [1 / 2, 1 / 2])\n d2 = Distribution(['0', '1'], [1 / 3, 2 / 3])\n d3 = Distribution(['0', '1'], [2 / 5, 3 / 5])\n return d1, d2, d3", "def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), nu=(0.0 ,inf), r=(0.0, inf), s=(0.0, inf))\n return params", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low' ] = self.low\n paramDict['high' ] = self.high\n paramDict['alpha'] = self.alpha\n paramDict['beta' ] = self.beta\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['range'] = self.range\n return paramDict\n # no other additional parameters required", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar\n paramDict['k' ] = self.k\n paramDict['low' ] = self.low\n return paramDict", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def MyBaseMoments(p,q,img,gauss_sigma,gauss_centroid=None, gauss_g1=0., gauss_g2=0.):\n weight = galsim.Image(np.zeros_like(img.array))\n gauss = galsim.Gaussian(sigma=gauss_sigma*pixel_scale).shear(g1=gauss_g1,g2=gauss_g2)\n if gauss_centroid is None:\n gauss_centroid = img.true_center\n weight = gauss.drawImage(image=weight, scale=pixel_scale, method='no_pixel', use_true_center=True, offset=(gauss_centroid-img.true_center)*(1))\n x = np.linspace(img.xmin-img.center.x*0-gauss_centroid.x*1, img.xmax-img.center.x*0-gauss_centroid.x*1, img.xmax-img.xmin+1)+0.*0.5\n y = np.linspace(img.ymin-img.center.y*0-gauss_centroid.y*1, img.ymax-img.center.y*0-gauss_centroid.y*1, img.ymax-img.ymin+1)+0.*0.5\n X, Y = np.meshgrid(x,y)\n\n Q00 = np.sum(weight.array*img.array)\n Q10 = gauss_centroid.x + np.sum(X*weight.array*img.array)/Q00\n Q01 = gauss_centroid.y + np.sum(Y*weight.array*img.array)/Q00\n Q20 = np.sum((X**2)*weight.array*img.array)\n Q02 = np.sum((Y**2)*weight.array*img.array)\n\n monomial = 1.\n for pp in xrange(p):\n monomial *= X\n for qq in xrange(q):\n monomial *= Y\n Qpq = np.sum(monomial*weight.array*img.array) #/Q00\n\n return Qpq", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def initialize(self):\n\t\tmu = 0\n\t\tsigma = np.sqrt(2 / self.dataset[\"d\"])\n\n\t\tself.F1 = np.random.normal(mu, sigma, self.F1.shape)\n\t\tself.F2 = np.random.normal(mu, sigma, self.F2.shape)\n\t\tself.W = np.random.normal(mu, sigma, self.W.shape)\n\n\t\tself.F1_momentum = np.zeros(self.F1.shape)\n\t\tself.F2_momentum = np.zeros(self.F2.shape)\n\t\tself.W_momentum = np.zeros(self.W.shape)", "def init_hyperparameters():\n alpha = .8\n alpha2 = 1\n\n return alpha, alpha2", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def logdprior(parameters, hyperparameters):\n sigma_w_part = parameters[0] + invgamma_logpdf(parameters[0],\n hyperparameters[\"sigma_w_shape\"], hyperparameters[\"sigma_w_scale\"])\n sigma_v_part = parameters[1] + invgamma_logpdf(parameters[1], hyperparameters[\"sigma_v_shape\"], hyperparameters[\"sigma_v_scale\"])\n return sigma_w_part + sigma_v_part", "def initializeDistribution(self):\n if self.functionType == 'CDF':\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,True)\n else:\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,False)\n self.dimensionality = self._distribution.returnDimensionality()\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimensionality)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimensionality)]", "def get_initial_params(self, x, y, yerr):\n# p0 = zeros(self.max_phonons + 1)\n p0 = zeros(2)\n p0[0] = 100\n p0[1] = .1\n return p0", "def gaussian_parameters(h, dim=-1):\n m, h = torch.split(h, h.size(dim) // 2, dim=dim)\n v = F.softplus(h) + 1e-8\n return m, v", "def moments(values):\n\n meanValue = numpy.mean(values)\n return (meanValue,\n numpy.sqrt(moment(values, meanValue, 2)),\n moment(values, meanValue, 3),\n moment(values, meanValue, 4))" ]
[ "0.6455378", "0.6220392", "0.6185545", "0.6109156", "0.6106636", "0.60708535", "0.60512894", "0.60178155", "0.5966822", "0.59502286", "0.58735156", "0.5850575", "0.58171284", "0.5816514", "0.57661724", "0.5720821", "0.57173246", "0.57122564", "0.5709464", "0.57005703", "0.56566393", "0.56566393", "0.56566393", "0.5650613", "0.5647901", "0.56205666", "0.56183773", "0.5581819", "0.5580499", "0.5579431", "0.5574771", "0.55595124", "0.5549218", "0.5547417", "0.55450284", "0.55362755", "0.5533073", "0.55328864", "0.55292463", "0.55152035", "0.55004686", "0.54993844", "0.54803777", "0.547777", "0.54711723", "0.54631597", "0.5454735", "0.54525715", "0.5436794", "0.54347324", "0.54311454", "0.54289407", "0.5415726", "0.5407236", "0.540343", "0.5398714", "0.5398714", "0.53965294", "0.5371645", "0.5370123", "0.5353437", "0.5351195", "0.5346446", "0.5343545", "0.53380007", "0.53346354", "0.5332762", "0.53268677", "0.53158087", "0.53155935", "0.5310139", "0.5309718", "0.5300877", "0.5296502", "0.5292304", "0.5292062", "0.528314", "0.5280636", "0.5273658", "0.52717936", "0.5267175", "0.5266302", "0.5263959", "0.525519", "0.52547693", "0.5243865", "0.52428687", "0.5240332", "0.52383465", "0.52359915", "0.5235011", "0.5232048", "0.5220906", "0.5220906", "0.5220359", "0.521662", "0.5211572", "0.5206511", "0.52055675" ]
0.6434524
2
This function computes the distribution internal parameters from its two first moments.
def _compute_internals(self, moments): [mean, stdv] = moments internals = {} internals['mu'] = mean internals['sigma'] = stdv return internals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a'] = mean - np.sqrt(3) * stdv\n internals['b'] = mean + np.sqrt(3) * stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a'] = mean - np.sqrt(3) * stdv\n internals['b'] = mean + np.sqrt(3) * stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['k'] = mean ** 2. / stdv ** 2.\n internals['LAMBDA'] = mean / stdv ** 2.\n\n return internals", "def calc_moments(distribution):\n x = torch.linspace(2, 22, 31)\n d_mean = torch.sum(x * distribution)\n d_var = torch.sum(distribution * (x - d_mean) ** 2) \n \n return d_mean, torch.sqrt(d_var)", "def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n mu = 0\r\n Cov = 0\r\n\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n\r\n X = []\r\n for i in x:\r\n j = [1,i[0]]\r\n X.append(j)\r\n X = np.array(X)\r\n\r\n common = np.matmul( X.T , X) + np.identity(2) * sigma2/beta\r\n common = np.linalg.inv(common)\r\n Cov = common * sigma2\r\n mu = np.matmul(common , np.matmul (X.T , z) )\r\n mu = mu.flatten()\r\n print(\"X.shape: \" , X.shape)\r\n print(\"z.shape: \",z.shape)\r\n print(\"Cov.shape\" , Cov.shape)\r\n print(\"mu.shape: \",mu.shape)\r\n density = util.density_Gaussian(mu , Cov , x_s).reshape(150 , 150 ).T\r\n print(\"density.shape\",density.shape)\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n\r\n \r\n\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )))\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.xlim = (-1,1)\r\n plt.ylim = (-1,1)\r\n plt.title('p(a|x1,z1....xn,zn) for '+ str(len(x)) +' samples')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x')\r\n\r\n return (mu,Cov)", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n cov = stdv / mean\n zeta = np.sqrt(np.log(1. + cov ** 2.))\n LAMBDA = np.log(mean) - 0.5 * zeta ** 2.\n internals = {}\n internals['LAMBDA'] = LAMBDA\n internals['zeta'] = zeta\n\n return internals", "def moments(self):", "def parameters(self):\n\n m = self.__m\n s = linalg.cholesky(self.__prod).transpose()\n w = self.__weight\n\n # Compute the parameters of the posterior distribution.\n return linalg.solve(s[:m, :m], s[:m, m:]), \\\n np.dot(s[:m, :m].transpose(), s[:m, :m]), \\\n np.dot(s[m:, m:].transpose(), s[m:, m:]) / w, \\\n w", "def parameters_to_marginal_moments(prob, distmu, distsigma):\n good = np.isfinite(prob) & np.isfinite(distmu) & np.isfinite(distsigma)\n prob = prob[good]\n distmu = distmu[good]\n distsigma = distsigma[good]\n distmean, diststd, _ = parameters_to_moments(distmu, distsigma)\n rbar = (prob * distmean).sum()\n r2bar = (prob * (np.square(diststd) + np.square(distmean))).sum()\n return rbar, np.sqrt(r2bar - np.square(rbar))", "def _get_distribution_variables(self, R):\n domain, Domain = self.domain_Domain\n phase_name = self.phase_name\n\n R_typ = self.phase_param.R_typ # [m]\n # Particle-size distribution (area-weighted)\n f_a_dist = self.phase_param.f_a_dist(R) # [m-1]\n\n # Ensure the distribution is normalised, irrespective of discretisation\n # or user input\n f_a_dist = f_a_dist / pybamm.Integral(f_a_dist, R) # [m-1]\n\n # Volume-weighted particle-size distribution\n f_v_dist = R * f_a_dist / pybamm.Integral(R * f_a_dist, R) # [m-1]\n\n # Number-based particle-size distribution\n f_num_dist = (f_a_dist / R**2) / pybamm.Integral(\n f_a_dist / R**2, R\n ) # [m-1]\n\n # True mean radii and standard deviations, calculated from the f_a_dist that\n # was given, all have units [m]\n R_num_mean = pybamm.Integral(R * f_num_dist, R)\n R_a_mean = pybamm.Integral(R * f_a_dist, R)\n R_v_mean = pybamm.Integral(R * f_v_dist, R)\n sd_num = pybamm.sqrt(pybamm.Integral((R - R_num_mean) ** 2 * f_num_dist, R))\n sd_a = pybamm.sqrt(pybamm.Integral((R - R_a_mean) ** 2 * f_a_dist, R))\n sd_v = pybamm.sqrt(pybamm.Integral((R - R_v_mean) ** 2 * f_v_dist, R))\n\n # X-average the means and standard deviations to give scalars\n # (to remove the \"electrode\" domain, if present)\n R_num_mean = pybamm.x_average(R_num_mean)\n R_a_mean = pybamm.x_average(R_a_mean)\n R_v_mean = pybamm.x_average(R_v_mean)\n sd_num = pybamm.x_average(sd_num)\n sd_a = pybamm.x_average(sd_a)\n sd_v = pybamm.x_average(sd_v)\n\n # X-averaged distributions, or broadcast\n if R.domains[\"secondary\"] == [f\"{domain} electrode\"]:\n f_a_dist_xav = pybamm.x_average(f_a_dist)\n f_v_dist_xav = pybamm.x_average(f_v_dist)\n f_num_dist_xav = pybamm.x_average(f_num_dist)\n else:\n f_a_dist_xav = f_a_dist\n f_v_dist_xav = f_v_dist\n f_num_dist_xav = f_num_dist\n\n # broadcast\n f_a_dist = pybamm.SecondaryBroadcast(f_a_dist_xav, [f\"{domain} electrode\"])\n f_v_dist = pybamm.SecondaryBroadcast(f_v_dist_xav, [f\"{domain} electrode\"])\n f_num_dist = pybamm.SecondaryBroadcast(\n f_num_dist_xav, [f\"{domain} electrode\"]\n )\n\n variables = {\n f\"{Domain} {phase_name}particle sizes\": R / R_typ,\n f\"{Domain} {phase_name}particle sizes [m]\": R,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_a_dist,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_v_dist,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" distribution [m-1]\": f_num_dist,\n f\"{Domain} area-weighted mean particle radius [m]\": R_a_mean,\n f\"{Domain} volume-weighted mean particle radius [m]\": R_v_mean,\n f\"{Domain} number-based mean particle radius [m]\": R_num_mean,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_a,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_v,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" standard deviation [m]\": sd_num,\n # X-averaged sizes and distributions\n f\"X-averaged {domain} {phase_name}particle sizes [m]\": pybamm.x_average(R),\n f\"X-averaged {domain} area-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_a_dist_xav,\n f\"X-averaged {domain} volume-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_v_dist_xav,\n f\"X-averaged {domain} number-based {phase_name}particle-size \"\n \"distribution [m-1]\": f_num_dist_xav,\n }\n\n return variables", "def grd_posterior_gaussian(self, ) -> Tuple[np.ndarray, np.ndarray]:\n xmin, xmax = self.x_range\n ymin, ymax = self.y_range\n\n mu = np.array([0, 0])\n sigma = np.zeros((2, 2))\n\n _sample = self._sample\n _prior = self.prior\n\n def mean_x(x: float, y: float):\n return x * _sample(x, y) * _prior.eval(x, y)\n\n def mean_y(x: float, y: float):\n return y * _sample(x, y) * _prior.eval(x, y)\n\n def var_x(x: float, y: float):\n return x * mean_x(x, y)\n\n def var_y(x: float, y: float):\n return y * mean_y(x, y)\n\n # def var_xy(x: float, y: float):\n # return x * mean_y(x, y)\n\n # First moment\n (mu[0], mu[1]) = (integrate.dblquad(mean_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(mean_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0])\n (sigma[0, 0], sigma[1, 1]) = \\\n (integrate.dblquad(var_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(var_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n # integrate.dblquad(var_xy, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n return mu, sigma", "def calc_parameters(T, N, sigma, r, div):\n dt = T / N\n u = np.exp(sigma * np.sqrt(dt))\n d = 1 / u\n b = r - div\n q = 1 / 2 + 1 / 2 * (b - 1 / 2 * sigma ** 2) * np.sqrt(dt) / sigma # P(up movement)\n return dt, u, d, q, b", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n return paramDict", "def calc_parameters(T, N, sigma, r, div):\n dt = T/N\n u = np.exp(sigma*np.sqrt(dt))\n d = 1/u\n b = r-div\n q = 1/2 + 1/2 * (b - 1/2 * sigma**2)*np.sqrt(dt)/sigma # P(up movement)\n return(dt, u, d, q, b)", "def random():\n # only care about the value of second_moment:\n # curve = scale * e**(-second_moment^2 q^2)/q^2\n # scale = 6 pi/100 (contrast/density*absorbed_amount)^2 * Vf/radius\n # the remaining parameters can be randomly generated from zero to\n # twice the default value as done by default in compare.py\n pars = dict(\n scale=1,\n second_moment=10**np.random.uniform(1, 3),\n )\n return pars", "def N2_f(d1,d2,rho):\n import statsmodels.sandbox.distributions.extras as extras\n muStandardNormal=0.0 # mean of a standard normal distribution \n varStandardNormal=1.0 # variance of standard normal distribution \n upper=([d1,d2]) # upper bound for two values\n v=varStandardNormal # simplify our notations\n mu=muStandardNormal # simplify our notations\n covM=([v,rho],[rho,v])\n return extras.mvnormcdf(upper,mu,covM)", "def get_means_and_scales(self):\n return self.optim.parameters[::2], np.exp(self.optim.parameters[1::2])", "def initializeDistribution(self):\n self.convertToDistrDict['Laguerre'] = self.convertLaguerreToGamma\n self.convertToQuadDict ['Laguerre'] = self.convertGammaToLaguerre\n self.measureNormDict ['Laguerre'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed):\n # and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low)\n #self.lowerBoundUsed = 0.0\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Laguerre'\n self.preferredPolynomials = 'Laguerre'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = 0.0\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low,a,b)", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n paramDict['low'] = self.low\n return paramDict", "def parameters(conv, orthogonal):\n nrm = operator_one_norm(conv.weight).detach().cpu().numpy()\n\n if nrm > 15:\n print('Overflow likely, norm={}'.format(nrm))\n\n m = np.arange(1, len(THETA) + 1)\n vals = m * np.ceil(nrm / THETA)\n mstar = min(1 + np.argmin(vals), 56)\n s = int(np.ceil(nrm / THETA[mstar - 1]))\n\n return mstar, s", "def get_initial_parameters(token_segs):\r\n estems = {} # tracks the average probability of each root\r\n esuffix = {} # tracks the average probability of each suffix\r\n etrans = {} # tracks the average probability of each (transition, feature) pair\r\n eftrans = {} # tracks the average probability of each feature (interface between stem and suffix)\r\n\r\n # collect the probabilities of each object, to be normalized (divided by their totals) later\r\n for ts_list in token_segs:\r\n avg_prob = 1.0 / len(ts_list)\r\n for ts in ts_list:\r\n root = ts.root\r\n rand_val = 1.0\r\n if root in estems:\r\n estems[root] += rand_val * avg_prob\r\n else: estems[root] = rand_val * avg_prob\r\n\r\n suffix = ts.suffix\r\n if suffix in esuffix:\r\n esuffix[suffix] += rand_val * avg_prob\r\n else: esuffix[suffix] = rand_val * avg_prob\r\n\r\n trans = ts.trans\r\n ftrans = feature(root, suffix)\r\n if (trans, ftrans) in etrans:\r\n etrans[(trans, ftrans)] += rand_val * avg_prob\r\n else: etrans[(trans, ftrans)] = rand_val * avg_prob\r\n\r\n if ftrans in eftrans:\r\n eftrans[ftrans] += rand_val * avg_prob\r\n else: eftrans[ftrans] = rand_val * avg_prob\r\n\r\n # divide by the totals\r\n probstems = estems\r\n probsum = sum(probstems.values())\r\n for stem in probstems:\r\n probstems[stem] /= probsum\r\n\r\n probsuffix = esuffix\r\n probsum = sum(probsuffix.values())\r\n for suffix in probsuffix:\r\n probsuffix[suffix] /= probsum\r\n\r\n probtrans = etrans\r\n for trans, ftrans in probtrans:\r\n probtrans[(trans, ftrans)] /= eftrans[ftrans]\r\n\r\n return probstems, probsuffix, probtrans", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def get_distribution(self):\n\n # If the distributions have been updated before.\n if self.update_number > 0:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number,1):\n probability = self.document_distribution_over_topic[m][k] / self.update_number\n self.document_distribution_over_topic[m][k] = probability\n for k in range(0, self.topic_number,1):\n for v in range(0, self.term_number,1):\n probability = self.topic_distribution_over_term[k][v] / self.update_number\n self.topic_distribution_over_term[k][v] = probability\n # The distributions have not been updated once.\n else:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number, 1):\n self.document_distribution_over_topic[m][k] = (\n (self.document_topic_count_matrix[m][k] + self.alpha[k]) / (\n self.sum_document_by_topic_count[m] + self.sum_alpha))\n for k in range(0, self.topic_number, 1):\n for v in range(0, self.term_number, 1):\n self.topic_distribution_over_term[k][v] = (\n (self.topic_term_count_matrix[k][v] + self.beta[v]) / (\n self.sum_topic_by_term_count[k] + self.sum_beta))", "def generate_moments(hyper, params):\n\n k, d = hyper['k'], hyper['d']\n\n p = params # Shorthand, don't judge\n m = {} # Moments\n for x1 in xrange(1,d+1):\n m[(x1,)] = sum( p[(h,x1)] * p[(h,)] for h in xrange(1,k+1) )\n for x2 in xrange(1,d+1):\n m[(x1,x2)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,)] for h in xrange(1,k+1) )\n for x3 in xrange(1,d+1):\n m[(x1,x2,x3)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,x3)] * p[(h,)] for h in xrange(1,k+1) )\n return m", "def update_moments_r(self):\n denominator = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 )\n nominator1 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 ) * self.constellation\n \n nominator2 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2) * np.power(self.constellation, 2)\n try:\n \n moment1 = nominator1.sum(axis=1) / denominator.sum(axis=1)\n moment2 = nominator2.sum(axis=1) / denominator.sum(axis=1)\n assert np.all(np.logical_not(np.isnan(moment1))) and np.all(np.logical_not(np.isnan(moment2)))\n except:\n print(\"Oops! That was no valid number. Try again...\")\n\n \n self.mu = moment1\n return moment1, moment2", "def var_parameters(jd,mag,err):\n\n mean = np.mean(mag)\n nepochs = float(len(jd))\n\n chi = np.sum( (mag - mean)**2. / err**2. )\n p_chi = chi2.cdf(chi,(nepochs-1))\n\n\n a = (mag-mean)**2\n ex_var = (np.sum(a-err**2)/((nepochs*(mean**2))))\n sd = np.sqrt((1./(nepochs-1))*np.sum(((a-err**2)-ex_var*(mean**2))**2))\n ex_verr = sd/((mean**2)*np.sqrt(nepochs))\n\n\n return p_chi, ex_var, ex_verr", "def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n m_v = np.zeros(2)\r\n print(\"m_v shape: \" ,m_v.shape)\r\n c_v = np.array( [ [ beta , 0 ] , [ 0 , beta ] ] )\r\n print(\"c_v shape: \",c_v.shape)\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n print(\"x_s shape: \",x_s.shape)\r\n density = util.density_Gaussian(m_v , c_v , x_s)\r\n #print(density)\r\n print(\"length density \",len(density))\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )) )\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.title('p(a)')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x')\r\n return", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['strategy'] = self.strategy\n paramDict['nPoints'] = self.nPoints\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar # rate parameter\n paramDict['low' ] = self.low # lower domain boundary\n return paramDict", "def PH2From3Moments (moms, prec=1e-14):\n\n m1, m2, m3 = moms\n\n # check moment boounds\n m2l = APH2ndMomentLowerBound(m1, 2) \n m3l = APH3rdMomentLowerBound(m1, m2, 2) \n m3u = APH3rdMomentUpperBound(m1, m2, 2) \n \n if m2<m2l:\n raise Exception(\"The given second moment is not feasible!\") \n if m3<m3l:\n raise Exception(\"The given third moment is not feasible (too small)!\")\n if m3>m3u:\n raise Exception(\"The given third moment is not feasible (too large)!\")\n \n # check if we have an exponential distribution\n if abs(m2/m1/m1-2.0) < prec:\n return (np.matrix([1]), np.matrix([[-1/m1]]))\n \n # calculate parameters\n b = 3.0*m1*m2-m3\n c = 3.0*m2*m2-2.0*m1*m3\n e = -2.0*m1*m1+m2\n a = b*b+6.0*c*e\n if a<0:\n a = 0\n a = math.sqrt(a)\n if c>0:\n lambda1 = (b - a) / c\n lambda2 = (b + a) / c\n p = (-b-6.0*m1*e+a) / (b+a)\n elif c<0:\n lambda1 = (b + a) / c\n lambda2 = (b - a) / c\n p = (b+6.0*m1*e+a) / (-b+a)\n elif c==0:\n lambda1 = 0\n lambda2 = 1.0 / m1\n p = 0\n \n # return the result\n return (np.matrix([p,1.0-p]), np.matrix([[-lambda1, lambda1], [0,-lambda2]]))", "def prop_func_form_params(param1,param2,*arg):\n return np.log(MH.simple_2D_Gauss(param1-param2,arg[0],arg[1]))", "def fdist(param1, param2):\n return(prng.gamma(param1, param2))", "def initializeDistribution(self):\n self.convertToDistrDict['Jacobi'] = self.convertJacobiToBeta\n self.convertToQuadDict ['Jacobi'] = self.convertBetaToJacobi\n self.measureNormDict ['Jacobi'] = self.stdProbabilityNorm\n #this \"if\" section can only be called if distribution not generated using readMoreXML\n if (not self.upperBoundUsed) and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,self.low)\n else:\n if self.lowerBoundUsed == False:\n a = 0.0\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,a,b,self.low)\n self.preferredPolynomials = 'Jacobi'\n self.compatibleQuadrature.append('Jacobi')\n self.compatibleQuadrature.append('ClenshawCurtis')", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['apex' ] = self.apex\n paramDict['min' ] = self.min\n paramDict['max' ] = self.max\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mu' ] = self.mu\n return paramDict", "def distribution(self):\n \n #external_distribution serves both the purpose of external setting of distribution and the caching of distribution()\n if self.external_distribution:\n return self.external_distribution_array;\n else:\n energy_vector = []\n superset = self.generate_superset(0) \n \n for i in superset:\n state = self.ket(i)\n \n norm_squared = np.dot(state.T, state)\n \n if norm_squared > 0: #zero is appended at the end\n energy = np.dot(state.T, np.dot( self.epsilon, state))\n interaction = np.dot(state.T, np.dot( self.u, state))/2.0 #divide by two. Otherwise, <l r| U |l r > = U_LR + U_RL = 2U\n #print state, np.dot(self.u, state) \n #print interaction\n energy_vector.append( energy + interaction )\n \n energy_vector.insert(0, 0.0) \n probability = np.exp( np.multiply(-self.beta, energy_vector)) \n probability /= probability.sum() \n return probability", "def initializeDistribution(self):\n self.convertToDistrDict['Hermite'] = self.convertHermiteToNormal\n self.convertToQuadDict ['Hermite'] = self.convertNormalToHermite\n self.measureNormDict ['Hermite'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed) and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicNormalDistribution(self.mean,\n self.sigma)\n self.lowerBound = -sys.float_info.max\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Hermite'\n self.preferredPolynomials = 'Hermite'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = -sys.float_info.max\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicNormalDistribution(self.mean,\n self.sigma,\n a,b)", "def prop_dist_form_params(*arg):\n return np.random.multivariate_normal(*arg)", "def _get_prior_params(self):\n # relation transformation matrix\n M_mu = np.eye(self.n_polarities, dtype=\"float32\")\n M_mu[1, :] = [0., 0.3, 0.]\n M_mu = np.tile(M_mu, (self.n_rels, 1)).reshape(\n self.n_rels, self.n_polarities, self.n_polarities\n )\n # for rel, rel_idx in iteritems(self.rel2idx):\n # # swap axes for contrastive relations\n # if check_rel(rel, CONTRASTIVE_RELS):\n # mu_i = M_mu[rel_idx]\n # mu_i[[0, 2]] = mu_i[[2, 0]]\n M_mu = torch.tensor(M_mu)\n M_sigma = torch.tensor(\n np.ones((self.n_rels, self.n_polarities, self.n_polarities),\n dtype=\"float32\")\n )\n # beta\n beta_p = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n beta_q = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n # z_epsilon\n z_epsilon_p = torch.tensor(1.)\n z_epsilon_q = torch.tensor(15.)\n # scale factor\n scale_factor = torch.tensor(34.)\n return {\"M_mu\": M_mu, \"M_sigma\": M_sigma, \"beta_p\": beta_p,\n \"beta_q\": beta_q, \"z_epsilon_p\": z_epsilon_p,\n \"z_epsilon_q\": z_epsilon_q, \"scale_factor\": scale_factor}", "def demo_indef():\n n_dim = 2\n A = np.eye(n_dim)\n A[1, 1] = -1.\n covar = np.eye(n_dim)\n mean = np.zeros(n_dim)\n approx = approx_quad_form(mean, covar, A)\n\n # Sample from true dist\n n_sample = 10000\n x = np.random.multivariate_normal(mean, covar, n_sample)\n q_samples = np.zeros(n_sample)\n for i in range(n_sample):\n q_samples[i] = x[i] @ A @ x[i]\n\n q = np.linspace(-10, 10)\n\n plt.plot(\n q, approx(q), label='Approx.',\n color='tab:blue', linestyle='--')\n bins = np.linspace(-8, 8, 81)\n bins[0] = -np.inf\n bins[-1] = np.inf\n plt.hist(\n q_samples, density=True, histtype='stepfilled',\n bins=bins,\n alpha=0.5, color='black', label='Samples')\n plt.xlabel('q')\n plt.ylabel('pdf(q) [-]')\n plt.legend()\n\n central_moments_sample = scipy.stats.moment(\n q_samples, moment=[0, 1, 2, 3, 4])\n print(central_moments_sample)", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['mu'] = self.mean\n retDict['sigma'] = self.sigma\n return retDict", "def initializeDistribution(self):\n self.raiseAMessage('initialize distribution')\n mu = distribution1D.vectord_cxx(len(self.mu))\n for i in range(len(self.mu)):\n mu[i] = self.mu[i]\n covariance = distribution1D.vectord_cxx(len(self.covariance))\n for i in range(len(self.covariance)):\n covariance[i] = self.covariance[i]\n if self.method == 'spline':\n if self.covarianceType != 'abs':\n self.raiseAnError(IOError,'covariance with type ' + self.covariance + ' is not implemented for ' + self.method + ' method')\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu)\n elif self.method == 'pca':\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu, str(self.covarianceType), self.rank)\n if self.transformation:\n self.lowerBound = [-sys.float_info.max]*self.rank\n self.upperBound = [sys.float_info.max]*self.rank\n else:\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimension)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimension)]", "def _init_params(self):\n self.W_ems = []\n self.b_ems = []\n if self.rank_n_approx:\n W_em1 = self.init_fn[0](self.n_in,\n self.rank_n_approx,\n self.sparsity[0],\n self.scale[0],\n self.rng)\n W_em2 = self.init_fn[0](self.rank_n_approx,\n self.n_hids[0],\n self.sparsity[0],\n self.scale[0],\n self.rng)\n self.W_em1 = theano.shared(W_em1,\n name='W1_0_%s'%self.name)\n self.W_em2 = theano.shared(W_em2,\n name='W2_0_%s'%self.name)\n self.W_ems = [self.W_em1, self.W_em2]\n\n else:\n W_em = self.init_fn[0](self.n_in,\n self.n_hids[0],\n self.sparsity[0],\n self.scale[0],\n self.rng)\n self.W_em = theano.shared(W_em,\n name='W_0_%s'%self.name)\n self.W_ems = [self.W_em]\n\n self.b_em = theano.shared(\n self.bias_fn[0](self.n_hids[0], self.bias_scale[0],self.rng),\n name='b_0_%s'%self.name)\n self.b_ems = [self.b_em]\n\n for dx in range(1, self.n_layers):\n W_em = self.init_fn[dx](self.n_hids[dx-1] / self.pieces[dx],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n self.rng)\n W_em = theano.shared(W_em, name='W_%d_%s'%(dx,self.name))\n self.W_ems += [W_em]\n\n b_em = theano.shared(\n self.bias_fn[dx](self.n_hids[dx], self.bias_scale[dx],self.rng),\n name='b_%d_%s'%(dx,self.name))\n self.b_ems += [b_em]\n\n self.params = [x for x in self.W_ems]\n\n if self.learn_bias and self.learn_bias!='last':\n self.params = [x for x in self.W_ems] + [x for x in self.b_ems]\n elif self.learn_bias == 'last':\n self.params = [x for x in self.W_ems] + [x for x in\n self.b_ems][:-1]\n self.params_grad_scale = [self._grad_scale for x in self.params]\n if self.weight_noise:\n self.nW_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_ems]\n self.nb_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_ems]\n\n self.noise_params = [x for x in self.nW_ems] + [x for x in self.nb_ems]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape)\n for x in self.noise_params]", "def Rosenblatt_Transform(dist, x_i): \n if dist.stats(moments = 's') > 1 or dist.stats(moments = 's') < -1:\n \n x_N_mean = dist.median()\n x_N_std = (x_i - x_N_mean)/sst.norm.ppf(dist.cdf(x_i))\n \n return(x_N_mean, x_N_std)\n \n else:\n x_N_std = sst.norm.pdf(sst.norm.ppf(dist.cdf(x_i)))/dist.pdf(x_i)\n x_N_mean = x_i - sst.norm.ppf(dist.cdf(x_i))*x_N_std\n return(x_N_mean, x_N_std)", "def _get_marginal_pdfs( res, nbins=51, verbose=True ):\n\tvparam_names = res.vparam_names\n\tweights = res.weights\n\tsamples = res.samples\n\n\tpdfdict = {}\n\n\tfor param in vparam_names :\n\t\tipar = vparam_names.index( param )\n\t\tparamvals = samples[:,ipar]\n\n\t\tif nbins>1:\n\t\t\tif param in res.bounds :\n\t\t\t\tparvalmin, parvalmax = res.bounds[param]\n\t\t\telse :\n\t\t\t\tparvalmin, parvalmax = 0.99*paramvals.min(), 1.01*paramvals.max()\n\t\t\tparambins = np.linspace( parvalmin, parvalmax, nbins, endpoint=True ).flatten()\n\t\t\tbinindices = np.digitize( paramvals, parambins )\n\n\t\t\t# we estimate the marginalized pdf by summing the weights of all points in the bin,\n\t\t\t# where the weight of each point is the prior volume at that point times the\n\t\t\t# likelihood, divided by the total evidence\n\t\t\tpdf = np.array( [ weights[np.where( binindices==ibin )].sum() for ibin in range(len(parambins)) ] )\n\t\telse :\n\t\t\tparambins = None\n\t\t\tpdf = None\n\n\n\t\tmean = (weights * samples[:,ipar]).sum()\n\t\t#print(samples[:,ipar]-mean)\n\t\t#print(weights)\n\t\tstd = np.sqrt( (weights * (samples[:,ipar]-mean)**2 ).sum() )\n\n\n\t\tpdfdict[param] = (parambins,pdf,mean,std,res.logz)\n\n\t\tif verbose :\n\t\t\tif np.abs(std)>=0.1:\n\t\t\t\tprint( ' <%s> = %.2f +- %.2f'%( param, np.round(mean,2), np.round(std,2)) )\n\t\t\telif np.abs(std)>=0.01:\n\t\t\t\tprint( ' <%s> = %.3f +- %.3f'%( param, np.round(mean,3), np.round(std,3)) )\n\t\t\telif np.abs(std)>=0.001:\n\t\t\t\tprint( ' <%s> = %.4f +- %.4f'%( param, np.round(mean,4), np.round(std,4)) )\n\t\t\telse :\n\t\t\t\tprint( ' <%s> = %.3e +- %.3e'%( param, mean, std) )\n\n\n\t\tif param == 'x0' :\n\t\t\tsalt2 = sncosmo.Model( source='salt2')\n\t\t\tsalt2.source.set_peakmag( 0., 'bessellb', 'ab' )\n\t\t\tx0_AB0 = salt2.get('x0')\n\t\t\tmBmean = -2.5*np.log10( mean / x0_AB0 )\n\t\t\tmBstd = 2.5*np.log10( np.e ) * std / mean\n\t\t\tmBbins = -2.5*np.log10( parambins / x0_AB0 )\n\n\t\t\tpdfdict['mB'] = ( mBbins, pdf, mBmean, mBstd )\n\t\t\tif verbose:\n\t\t\t\tprint( ' <%s> = %.3f +- %.3f'%( 'mB', np.round(mBmean,3), np.round(mBstd,3)) )\n\n\treturn( pdfdict )", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict", "def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n\r\n # mean of posterior distribution is the MAP estimate of the weights a\r\n # tau^2(from notes) is beta\r\n\r\n extra_col = np.ones((x.shape[0], 1))\r\n x = np.append(extra_col, x, axis = 1)\r\n\r\n alpha_map = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))@(np.transpose(x)@z)\r\n mu = alpha_map\r\n\r\n Cov = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))*sigma2\r\n\r\n num_x = 100\r\n num_y = 100\r\n\r\n xvalues = np.linspace(-1, 1, num = num_x)\r\n yvalues = np.linspace(-1, 1, num = num_y)\r\n X_grid, Y_grid = np.meshgrid(xvalues, yvalues)\r\n\r\n samples = np.column_stack((X_grid.flatten(), Y_grid.flatten()))\r\n\r\n density = util.density_Gaussian(mu.squeeze(), Cov, samples)\r\n density_grid = np.reshape(density, (num_x, num_y))\r\n\r\n plt.figure(1)\r\n plt.title(\"Posterior Distribution of α Given 5 Data Points\")\r\n plt.xlabel('$α_0$')\r\n plt.ylabel('$α_1$')\r\n plt.scatter(-0.1, -0.5, c='r')\r\n plt.contour(X_grid, Y_grid, density_grid, cmap=plt.cm.winter)\r\n plt.show()\r\n\r\n return (mu,Cov)", "def get_prob_for_distributions(p):\n w1 = p[0]\n mu1 = p[1]\n sigma1 = p[2]\n w2 = p[3]\n mu2 = p[4]\n sigma2 = p[5]\n w3 = p[6]\n mu3 = p[7]\n sigma3 = p[8]\n dist_range = (0, 4.330310991999920844e+01)\n x = np.linspace(dist_range[0], dist_range[1], 1000)\n A1 = np.array(w1 * mlab.normpdf(x, mu1, sigma1)).sum()\n A2 = np.array(w2 * mlab.normpdf(x, mu2, sigma2)).sum()\n A3 = np.array(w3 * mlab.normpdf(x, mu3, sigma3)).sum()\n p1 = A1 / (A1 + A2 + A3)\n p2 = A2 / (A1 + A2 + A3)\n p3 = A3 / (A1 + A2 + A3)\n return p1, p2, p3", "def Get_params(numparams, dt, D):\n # bounds from table 1 Kowalek et al 2020\n Nmin, Nmax = 30, 600\n Bmin, Bmax = 1, 6\n Rmin, Rmax = 1, 17\n alphamin, alphamax = 0.3, 0.7\n Qmin, Qmax = 1, 9\n\n # Gen parameters\n Q = np.random.uniform(Qmin, Qmax, size=numparams)\n Q1, Q2 = Q, Q\n\n NsND = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsAD = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsCD = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsDM = np.random.randint(Nmin, Nmax + 1, size=numparams)\n TDM = NsDM * dt\n\n B = np.random.uniform(Bmin, Bmax, size=numparams)\n r_c = np.sqrt(D * NsCD * dt / B) # solving for r_c in eq. 8 Kowalek\n\n R = np.random.uniform(Rmin, Rmax, size=numparams)\n v = np.sqrt(R * 4 * D / TDM) # solving for v in eq. 7 Kowalek\n\n alpha = np.random.uniform(alphamin, alphamax, size=numparams)\n\n # Compute sigma for ND, AD, CD from eq. 12 Kowalek\n sigmaND = np.sqrt(D * dt) / Q1\n sigmaAD = np.sqrt(D * dt) / Q1\n sigmaCD = np.sqrt(D * dt) / Q1\n\n # Compute sigma for DM from eq. 12 Kowalek\n sigmaDM = np.sqrt(D * dt + v ** 2 * dt ** 2) / Q2\n\n return np.array(\n [\n NsND,\n NsAD,\n NsCD,\n NsDM,\n D * np.ones(numparams),\n dt * np.ones(numparams),\n r_c,\n v,\n alpha,\n sigmaND,\n sigmaAD,\n sigmaCD,\n sigmaDM,\n ]\n ).T", "def _calpara(self):\n self.up = math.exp(self.sigma*math.sqrt(self.deltatime))\n self.down = math.exp(-self.sigma*math.sqrt(self.deltatime))\n self.upprob = (math.exp((self.r-self.d)*self.deltatime)-self.down)/(self.up-self.down)", "def APH2ndMomentLowerBound (m1, n):\n\n return float(m1)*m1*(n+1) / n", "def _construct_mom_stuff(self):\n a = self.mom_mix_rate\n dist_mean = self.GN.dist_mean\n dist_cov = self.GN.dist_cov\n # Get the generated sample observations for this batch, transformed\n # linearly into the desired space for moment matching...\n X_b = T.dot(self.GN.output, self.mom_match_proj)\n # Get their mean\n batch_mean = T.mean(X_b, axis=0)\n # Get the updated generator distribution mean\n new_mean = ((1.0 - a[0]) * self.GN.dist_mean) + (a[0] * batch_mean)\n # Use the mean to get the updated generator distribution covariance\n X_b_minus_mean = X_b - new_mean\n # Whelp, I guess this line needs the cast... for some reason...\n batch_cov = T.dot(X_b_minus_mean.T, X_b_minus_mean) / T.cast(X_b.shape[0], 'floatX')\n new_cov = ((1.0 - a[0]) * self.GN.dist_cov) + (a[0] * batch_cov)\n # Get the cost for deviation from the target distribution's moments\n mean_err = new_mean - self.target_mean\n cov_err = (new_cov - self.target_cov)\n mm_cost = self.mom_match_weight[0] * \\\n (T.sum(mean_err**2.0) + T.sum(cov_err**2.0))\n # Construct the updates for the running estimates of the generator\n # distribution's first and second-order moments.\n mom_updates = OrderedDict()\n mom_updates[self.GN.dist_mean] = new_mean\n mom_updates[self.GN.dist_cov] = new_cov\n return [mm_cost, mom_updates]", "def aicpdf(xvals, distribution, params):\n if distribution == 'pareto':\n pvals = (params['xmin'] * params['mu'] ** params['xmin']) / (xvals ** (params['xmin'] + 1))\n return pvals\n \n elif distribution == 'lognormal':\n #import pdb; pdb.set_trace()\n pvals = np.exp(-(np.log(xvals) - params['mu'])**2 / (2 * params['sigma']**2)) / (xvals * params['sigma'] * np.sqrt(2*np.pi))\n return pvals\n \n elif distribution == 'normal':\n pvals = np.exp(-(xvals - params['mu'])**2 / (2 * params['sigma']**2)) / (params['sigma'] * np.sqrt(2*np.pi))\n return pvals\n \n elif distribution == 'exponential':\n pvals = params['lambda'] * np.exp(-params['lambda'] * xvals)\n return pvals \n \n elif distribution == 'boundedpl':\n #pvals = (params['mu'] * (params['mu'] ** params['xmax'] - params['xmin'] ** params['xmax'])) / (xvals ** (params['mu'] + 1))\n #mu * (xmax ^ mu - xmin ^ mu) / x ^ (mu+1)\n pvals = (params['mu'] * (params['xmax'] ** params['mu'] - params['xmin'] ** params['mu'])) / (xvals ** (params['mu'] + 1))\n return pvals", "def moment(self, n, mu, sigma):\n return scipy_norm.moment(n, mu, sigma)", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def computeMoments(x):\n return (abs(stats.skew(x)),abs(stats.kurtosis(x,None,True)))", "def initializeDistribution(self):\n self.minVal = min(math.exp(self.upperBound),math.exp(self.lowerBound))\n self.maxVal = max(math.exp(self.upperBound),math.exp(self.lowerBound))", "def initializeDistribution(self):\n self.convertToDistrDict['Legendre'] = self.convertLegendreToUniform\n self.convertToQuadDict ['Legendre'] = self.convertUniformToLegendre\n self.measureNormDict ['Legendre'] = self.stdProbabilityNorm\n self.convertToDistrDict['ClenshawCurtis'] = self.convertLegendreToUniform\n self.convertToQuadDict ['ClenshawCurtis'] = self.convertUniformToLegendre\n self.measureNormDict ['ClenshawCurtis'] = self.stdProbabilityNorm\n self._distribution = distribution1D.BasicUniformDistribution(self.lowerBound,self.lowerBound+self.range)", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale))", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['mu'] = self.mean\n retDict['sigma'] = self.sigma\n retDict['low'] = self.low\n return retDict", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def estimate_gaussian_params(X):\n mu = X.mean(axis=0)\n var = X.std(axis=0)**2.0\n return mu,var", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::vbfH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)\", mu)')", "def like_one(theta,dt,dmag,sigma):\n\n gamma, A = theta\n aux=(1/np.sqrt(2*np.pi*Veff2(dt,sigma,A,gamma)))*np.exp(-1.0*(dmag**2)/(2.0*Veff2(dt,sigma,A,gamma)))\n\n return aux", "def update_params(x, prior, posterior):\r\n mu0, kappa0, alpha0, beta0 = prior\r\n mu_t, kappa_t, alpha_t, beta_t = posterior\r\n return np.r_[mu0, (kappa_t*mu_t + x)/(kappa_t + 1)], \\\r\n np.r_[kappa0, kappa_t + 1], \\\r\n np.r_[alpha0, alpha_t + 0.5], \\\r\n np.r_[beta0, beta_t + 0.5*kappa_t*(x - mu_t)**2/(kappa_t + 1)]", "def prior_params_tree(self):\n id = {name:i for i, name in enumerate(list(self.tree.keys()))}\n n_nodes = len(id)\n dist_mx = np.zeros((n_nodes, n_nodes))\n\n for node1, edges in self.tree.items():\n for node2, dist in edges.dist:\n dist_mx[id[node1], id[node2]] = dist\n dist_mx[id[node2], id[node1]] = dist\n\n # while np.count_nonzero(dist_mx) < (n_nodes ** 2 - n_nodes):\n for _ in range(20):\n for i, j in combinations(range(n_nodes), 2):\n if dist_mx[i,j] > 0:\n continue\n row_i = dist_mx[i]\n row_j = dist_mx[j]\n value = (row_i + row_j) * (row_i > 0) * (row_j > 0)\n dist_mx[i, j] = dist_mx[j, i] = - max(np.unique(value))\n dist_mx = np.abs(dist_mx)\n\n evolve_rate = []\n for node1, node2 in combinations(self.m_cov.keys(), 2):\n mx_cov_dist = np.abs(self.m_cov[node1] - self.m_cov[node2])\n elements = mx_cov_dist[np.triu_indices(len(mx_cov_dist))]\n norm_elements = elements / dist_mx[id[node2], id[node1]]\n evolve_rate += list(norm_elements)\n\n\n\n df = np.mean([p.shape[0] for _, p in self.m_profiles.items()])\n p_theta_alpha = df/2\n # p_theta_alpha = 4\n p_theta_beta = np.percentile(evolve_rate, 75) * (p_theta_alpha - 1)\n # print(p_theta_alpha, p_theta_beta)\n return p_theta_alpha, p_theta_beta", "def __init__(self, \n param_epsilon, \n param_tau,\n param_u, \n param_gamma_left,\n param_gamma_right,\n param_beta):\n self.epsilon = param_epsilon\n self.tau = param_tau\n self.u = param_u\n self.gamma_left = param_gamma_left\n self.gamma_right = param_gamma_right\n \n self.sigma_retarded = 1j * (self.gamma_left + self.gamma_right) / 2.0\n self.sigma_advanced = - self.sigma_retarded;\n \n self.dim = len(self.u)\n self.rho = np.zeros((2**self.dim))\n \n self.beta = param_beta\n \n self.cutoff_chance = 0.0001\n self.external_distribution = False\n self.external_distribution_array = self.distribution()\n self.external_distribution = True", "def _get_parameters(n, j, domain, g, ncap):\n alphas, betas = rc.recurrenceCoefficients(n - 2, lb=domain[0], rb=domain[1],\n j=j, g=g, ncap=ncap)\n omegas = g * np.array(alphas)\n ts = g * np.sqrt(np.array(betas)[1::])\n c0 = np.sqrt(betas[0])\n return omegas, ts, c0", "def aicmle(timeSeries, distribution):\n mlevals = {} \n if distribution == 'pareto':\n mlevals['xmin'] = np.min(timeSeries)\n mlevals['mu'] = 1 - timeSeries.shape[0] / (timeSeries.shape[0] * np.log(mlevals['xmin']) - np.sum(np.log(timeSeries)))\n \n elif distribution == 'lognormal':\n mlevals['mu'] = np.sum(np.log(timeSeries)) / timeSeries.shape[0]\n mlevals['sigma'] = np.sqrt(np.sum( (np.log(timeSeries) - mlevals['mu'])**2) / timeSeries.shape[0])\n \n elif distribution == 'normal':\n mlevals['mu'] = np.mean(timeSeries)\n mlevals['sigma'] = np.sqrt(sum((timeSeries - np.mean(timeSeries))**2) / timeSeries.shape[0])\n \n elif distribution == 'exponential':\n mlevals['lambda'] = 1.0 / np.mean(timeSeries)\n \n elif distribution == 'boundedpl':\n mlevals['xmin'] = np.min(timeSeries)\n mlevals['xmax'] = np.max(timeSeries)\n minmuEstimate = 1.1\n mlevals['mu'] = fmin(lambda mu: -len(timeSeries) * np.log( (mu - 1) / (np.min(timeSeries)**(1 - mu) - np.max(timeSeries)**(1 - mu))) + mu * np.sum(np.log(timeSeries)), minmuEstimate, disp=0)[0]\n\n return mlevals", "def __init__(self, mean=0.0, sigma=1.0):\n super().__init__()\n self.mean = mean\n self.sigma = sigma\n self.hasInfiniteBound = True\n self.type = 'Normal'\n self.distType = 'Continuous'\n self.compatibleQuadrature.append('Hermite')\n self.compatibleQuadrature.append('CDF')\n #THESE get set in initializeDistribution, since it depends on truncation\n #self.preferredQuadrature = 'Hermite'\n #self.preferredPolynomials = 'Hermite'", "def compute_t_params(mu, kappa, alpha, beta):\r\n mu_, sigma2_, dof_ = mu, beta*(kappa + 1)/(alpha*kappa), 2*alpha\r\n return mu_, sigma2_, dof_", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['n' ] = self.n\n paramDict['p' ] = self.p\n return paramDict", "def generate_stat(sample_size, sparsity = 0, amplitude = 0, sigma = 1):\n var = generate_variable(sample_size, sparsity, amplitude, sigma)\n y_obs = var[0]\n \n \"\"\" \n f is equal to -X(t,theta) and we will minimize f (max. X)\n \"\"\"\n def f(x):\n \"\"\" \n f(x)=-X(t,theta) where x[0]=t and x[1]=theta\n \"\"\"\n res = np.real(np.exp(-1j*x[1])*\\\n sum(y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1))) \n res = -res/np.sqrt(2*sample_size+1) \n return res\n \n def grad_f(x):\n \"\"\" \n gradient of f\n \"\"\"\n res1 = np.real(np.exp(-1j*x[1])*\\\n sum(1j*k*y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1)))\n res1 = -res1/np.sqrt(2*sample_size+1)\n \n res2 = np.real(np.exp(-1j*x[1])*\\\n sum(-1j*y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1)))\n res2 = -res2/np.sqrt(2*sample_size+1)\n return np.array([res1, res2])\n \n #% Minimizing f\n \n \"\"\" \n we minimize on [0, 2pi]^2\n \"\"\"\n bnds = ((0, 2*np.pi), (0, 2*np.pi))\n \n \"\"\" \n We begin by a greedy search of the initialization point over a grid of size 126^2\n the initialization point is init\n \"\"\"\n x = y = np.arange(0, 2*np.pi, 0.05)\n steps = 126\n X, Y = np.meshgrid(x, y)\n val = np.array([f([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])\n init = np.argmin(val)\n x1 = init%steps\n x2 = (init-x1)/steps\n init = [x1*0.05, x2*0.05]\n \n \"\"\" \n we minimize f...\n \"\"\"\n result = sco.minimize(f, init, method=\"L-BFGS-B\",\\\n jac=grad_f, bounds=bnds, tol=1e-15)\n \n \"\"\" \n (t1,theta1) is the argmax of X(t, theta) and l1=$\\lambda_1$\n \"\"\"\n t1 = result.x[0]\n theta1 = result.x[1]\n l1 = -f([t1,theta1])\n \n \n \"\"\" \n Function g(x) is equal to (X(t1,theta1)-X(x))/(1-rho((t1,theta1)-x))\n \"\"\"\n def g(x):\n a0 = x[0]-t1\n a1 = x[1]-theta1\n N = 2*sample_size+1\n \n vec = np.array([a0,a1])\n r = np.linalg.norm(vec)\n \"\"\" \n the value for r=0 is set to l1 (note that r=0 corresponds to x=(t1,theta1))\n \"\"\" \n res = l1 \n \n if (0<r) & (r<0.00001):\n \"\"\" \n we look a values near (t1,theta1) for which an indetermination occurs\n \"\"\" \n alpha= np.arccos(np.clip(a0/np.sqrt(a0**2+a1**2), -1.0, 1.0))\n u0 = np.cos(alpha)\n u1 = np.sin(alpha)\n \"\"\" \n u0,u1 defines the direction (unit vector)\n \"\"\"\n denom = sum((k*np.cos(alpha)-np.sin(alpha))**2*\\\n (np.sinc((r*(k*np.cos(alpha)-np.sin(alpha)))/(2*np.pi)))**2\\\n for k in range(-sample_size,sample_size+1))/N\n \"\"\" \n denom computes the denominator\n \"\"\"\n \n# \"\"\" \n# We use simpson rule for the numerator\n# \"\"\"\n# h = np.linspace(0,1,500)\n# \n# b0 = t1 + h*a0\n# b1 = theta1 + h*a1\n# \n# value = (1-h)*(u0**2*\\\n# np.real(np.exp(-1j*b1)*sum(-k**2*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))\\\n# +2*u0*u1*\\\n# np.real(np.exp(-1j*b1)*sum(k*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))\\\n# +u1**2*\\\n# np.real(np.exp(-1j*b1)*sum((-1)*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))) \n# value = value/np.sqrt(N)\n# \n# num = sci.simps(value, h)\n \n \"\"\" \n we use a quadrature for the numerator\n \"\"\" \n fun_int = lambda w: (1-w)*(u0**2*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum(-k**2*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))\\\n +2*u0*u1*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum(k*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))\\\n +u1**2*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum((-1)*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))) \n \n num = np.mean(sci.quad(fun_int, 0, 1, epsabs=1e-15, epsrel=1e-15, limit=1000))\n \n res = -num/denom\n \n if (r>=0.00001):\n \"\"\" \n we look a values far (t1,theta1) for which there is no indetermination\n \"\"\" \n res = (l1+f(x))/(1-(np.cos(a1)*dirichlet(a0,N)/N))\n \n return res \n \"\"\" \n we minimize g on [0, 2pi]^2 an dwe llok for the initialization point\n \"\"\"\n val2 = np.array([g([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])\n init2 = np.argmin(val2)\n x1 = init2%steps\n x2 = (init2-x1)/steps\n init2 = [x1*0.05, x2*0.05] \n result2 = sco.minimize(g, init2, method=\"L-BFGS-B\", bounds=bnds, tol=1e-15) \n \"\"\" \n argmin of g\n \"\"\"\n t2 = result2.x[0]\n theta2 = result2.x[1] \n \"\"\" \n value of lambda_2\n \"\"\"\n l21 = l1-result2.fun \n a0 = t2-t1\n a1 = theta2-theta1\n N = 2*sample_size+1\n l22 = l1-(l1+f([t2,theta2]))/(1-(np.cos(a1)*dirichlet(a0,N)/N))\n l2 = max(l21,l22)\n \"\"\" \n we compute the statistic\n \"\"\"\n alpha1 = (1/3)*sample_size*(sample_size+1)\n alpha2 = (1/np.sqrt(N))*\\\n sum((k**2-alpha1)*\\\n np.real(y_obs[k+sample_size]*np.exp(1j*(k*t1-theta1))) \\\n for k in range(-sample_size,sample_size+1))\n alpha3 = (1/np.sqrt(N))*sum(k*np.real(y_obs[k+sample_size]*\\\n np.exp(1j*(k*t1-theta1))) for k in range(-sample_size,sample_size+1)) \n stat = (sigma*(alpha1*l1+alpha2)*scs.norm.pdf(l1/sigma)+\\\n (alpha1*sigma**2-alpha3**2)*(1-scs.norm.cdf(l1/sigma)))/\\\n (sigma*(alpha1*l2+alpha2)*scs.norm.pdf(l2/sigma)+\\\n (alpha1*sigma**2-alpha3**2)*(1-scs.norm.cdf(l2/sigma))) \n \n return stat", "def moments2nd(data):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol)) \n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mrr = np.sum(rowgrid**2*data)/Isum\n Mcc = np.sum(colgrid**2*data)/Isum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*data)/Isum \n return Mcc, Mrr, Mrc", "def weights_treatment_parameters(init_dict, GRID):\n GRID = np.linspace(0.01, 0.99, num=99, endpoint=True)\n\n coeffs_untreated = init_dict[\"UNTREATED\"][\"params\"]\n coeffs_treated = init_dict[\"TREATED\"][\"params\"]\n cov = construct_covariance_matrix(init_dict)\n x = simulate_covariates(init_dict)\n\n # We take the specified distribution for the cost shifters from the paper.\n cost_mean, cost_sd = -0.0026, np.sqrt(0.270)\n v_mean, v_sd = 0.00, np.sqrt(cov[2, 2])\n\n eval_points = norm.ppf(GRID, loc=v_mean, scale=v_sd)\n\n ate_weights = np.tile(1.0, 99)\n tut_weights = norm.cdf(eval_points, loc=cost_mean, scale=cost_sd)\n\n tt_weights = 1 - tut_weights\n\n def tut_integrand(point):\n eval_point = norm.ppf(point, loc=v_mean, scale=v_sd)\n return norm.cdf(eval_point, loc=cost_mean, scale=cost_sd)\n\n def tt_integrand(point):\n eval_point = norm.ppf(point, loc=v_mean, scale=v_sd)\n return norm.cdf(eval_point, loc=cost_mean, scale=cost_sd)\n\n # Scaling so that the weights integrate to one.\n tut_scaling = quad(tut_integrand, 0.01, 0.99)[0]\n tut_weights /= tut_scaling\n\n tt_scaling = quad(tt_integrand, 0.01, 0.99)[0]\n tt_weights /= tt_scaling\n\n mte = mte_information(coeffs_treated, coeffs_untreated, cov, GRID, x, init_dict)\n\n return ate_weights, tt_weights, tut_weights, mte", "def _second_moment(R, sig_l, sig_m, lum, mass, Mbh, beta, tensor,\n sigmaPsf, normPsf, step, nrad, surf_l, pixSize):\n if (max(sigmaPsf) > 0) and (pixSize > 0): # PSF convolution\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n\n # Make grid linear in log of radius RR\n #\n rmax = np.max(R) + mx # Radius of circle containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in log(RR)\n rr = np.exp(logRad)\n\n # The model Vrms computation is only performed on the radial grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(rr)\n mgePol = np.empty_like(rr)\n rup = 3*np.max(sig_l)\n for j in range(rr.size): # Integration of equation (50)\n wm2Pol[j] = quadva(_integrand, [rr[j], rup],\n args=(sig_l, sig_m, lum, mass, Mbh, rr[j], beta, tensor))[0]\n mgePol[j] = np.sum(surf_l * np.exp(-0.5*(rr[j]/sig_l)**2))\n\n nx = np.ceil(rmax/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n xCar, yCar = np.meshgrid(x1, x1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + yCar**2) # Log radius of cartesian grid\n wm2Car = np.interp(r1, logRad, wm2Pol)\n mgeCar = np.interp(r1, logRad, mgePol)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normalization is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = np.sqrt(signal.fftconvolve(wm2Car, kernel, mode='same')\n / signal.fftconvolve(mgeCar, kernel, mode='same'))\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, x1, muCar, R/np.sqrt(2), R/np.sqrt(2))\n\n else: # No PSF convolution: just compute values\n\n mu = np.empty_like(R)\n rmax = 3*np.max(sig_l)\n for j in range(R.size):\n wm2Pol = quadva(_integrand, [R[j], rmax],\n args=(sig_l, sig_m, lum, mass, Mbh, R[j], beta, tensor))[0]\n mgePol = np.sum( surf_l * np.exp(-0.5*(R[j]/sig_l)**2) )\n mu[j] = np.sqrt(wm2Pol/mgePol)\n\n return mu", "def get_thrust_and_moment(self):\n\n f1 = self.k_f * self.omega_1 ** 2\n f2 = self.k_f * self.omega_2 ** 2\n \n # c is often used to indicate \"collective\" thrust\n c = f1 + f2\n \n M_x = (f1 - f2) * self.l\n return c, M_x", "def d2(self):\r\n return self.d1() - self.sigma*self.t**0.5", "def d2(self):\n d1 = self.d1()\n return d1 - self.sigma * (self.t **(0.5))", "def get_dists_2():\n d1 = Distribution(['0', '1'], [1 / 2, 1 / 2])\n d2 = Distribution(['0', '1'], [1 / 3, 2 / 3])\n d3 = Distribution(['0', '1'], [2 / 5, 3 / 5])\n return d1, d2, d3", "def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), nu=(0.0 ,inf), r=(0.0, inf), s=(0.0, inf))\n return params", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low' ] = self.low\n paramDict['high' ] = self.high\n paramDict['alpha'] = self.alpha\n paramDict['beta' ] = self.beta\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['range'] = self.range\n return paramDict\n # no other additional parameters required", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar\n paramDict['k' ] = self.k\n paramDict['low' ] = self.low\n return paramDict", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def MyBaseMoments(p,q,img,gauss_sigma,gauss_centroid=None, gauss_g1=0., gauss_g2=0.):\n weight = galsim.Image(np.zeros_like(img.array))\n gauss = galsim.Gaussian(sigma=gauss_sigma*pixel_scale).shear(g1=gauss_g1,g2=gauss_g2)\n if gauss_centroid is None:\n gauss_centroid = img.true_center\n weight = gauss.drawImage(image=weight, scale=pixel_scale, method='no_pixel', use_true_center=True, offset=(gauss_centroid-img.true_center)*(1))\n x = np.linspace(img.xmin-img.center.x*0-gauss_centroid.x*1, img.xmax-img.center.x*0-gauss_centroid.x*1, img.xmax-img.xmin+1)+0.*0.5\n y = np.linspace(img.ymin-img.center.y*0-gauss_centroid.y*1, img.ymax-img.center.y*0-gauss_centroid.y*1, img.ymax-img.ymin+1)+0.*0.5\n X, Y = np.meshgrid(x,y)\n\n Q00 = np.sum(weight.array*img.array)\n Q10 = gauss_centroid.x + np.sum(X*weight.array*img.array)/Q00\n Q01 = gauss_centroid.y + np.sum(Y*weight.array*img.array)/Q00\n Q20 = np.sum((X**2)*weight.array*img.array)\n Q02 = np.sum((Y**2)*weight.array*img.array)\n\n monomial = 1.\n for pp in xrange(p):\n monomial *= X\n for qq in xrange(q):\n monomial *= Y\n Qpq = np.sum(monomial*weight.array*img.array) #/Q00\n\n return Qpq", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def initialize(self):\n\t\tmu = 0\n\t\tsigma = np.sqrt(2 / self.dataset[\"d\"])\n\n\t\tself.F1 = np.random.normal(mu, sigma, self.F1.shape)\n\t\tself.F2 = np.random.normal(mu, sigma, self.F2.shape)\n\t\tself.W = np.random.normal(mu, sigma, self.W.shape)\n\n\t\tself.F1_momentum = np.zeros(self.F1.shape)\n\t\tself.F2_momentum = np.zeros(self.F2.shape)\n\t\tself.W_momentum = np.zeros(self.W.shape)", "def init_hyperparameters():\n alpha = .8\n alpha2 = 1\n\n return alpha, alpha2", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def logdprior(parameters, hyperparameters):\n sigma_w_part = parameters[0] + invgamma_logpdf(parameters[0],\n hyperparameters[\"sigma_w_shape\"], hyperparameters[\"sigma_w_scale\"])\n sigma_v_part = parameters[1] + invgamma_logpdf(parameters[1], hyperparameters[\"sigma_v_shape\"], hyperparameters[\"sigma_v_scale\"])\n return sigma_w_part + sigma_v_part", "def initializeDistribution(self):\n if self.functionType == 'CDF':\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,True)\n else:\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,False)\n self.dimensionality = self._distribution.returnDimensionality()\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimensionality)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimensionality)]", "def get_initial_params(self, x, y, yerr):\n# p0 = zeros(self.max_phonons + 1)\n p0 = zeros(2)\n p0[0] = 100\n p0[1] = .1\n return p0", "def gaussian_parameters(h, dim=-1):\n m, h = torch.split(h, h.size(dim) // 2, dim=dim)\n v = F.softplus(h) + 1e-8\n return m, v", "def moments(values):\n\n meanValue = numpy.mean(values)\n return (meanValue,\n numpy.sqrt(moment(values, meanValue, 2)),\n moment(values, meanValue, 3),\n moment(values, meanValue, 4))" ]
[ "0.6434524", "0.6434524", "0.6220392", "0.6185545", "0.6109156", "0.6106636", "0.60708535", "0.60512894", "0.60178155", "0.5966822", "0.59502286", "0.58735156", "0.5850575", "0.58171284", "0.5816514", "0.57661724", "0.5720821", "0.57173246", "0.57122564", "0.5709464", "0.57005703", "0.56566393", "0.56566393", "0.56566393", "0.5650613", "0.5647901", "0.56205666", "0.56183773", "0.5581819", "0.5580499", "0.5579431", "0.5574771", "0.55595124", "0.5549218", "0.5547417", "0.55450284", "0.55362755", "0.5533073", "0.55328864", "0.55292463", "0.55152035", "0.55004686", "0.54993844", "0.54803777", "0.547777", "0.54711723", "0.54631597", "0.5454735", "0.54525715", "0.5436794", "0.54347324", "0.54311454", "0.54289407", "0.5415726", "0.5407236", "0.540343", "0.5398714", "0.5398714", "0.53965294", "0.5371645", "0.5370123", "0.5353437", "0.5351195", "0.5346446", "0.5343545", "0.53380007", "0.53346354", "0.5332762", "0.53268677", "0.53158087", "0.53155935", "0.5310139", "0.5309718", "0.5300877", "0.5296502", "0.5292304", "0.5292062", "0.528314", "0.5280636", "0.5273658", "0.52717936", "0.5267175", "0.5266302", "0.5263959", "0.525519", "0.52547693", "0.5243865", "0.52428687", "0.5240332", "0.52383465", "0.52359915", "0.5235011", "0.5232048", "0.5220906", "0.5220906", "0.5220359", "0.521662", "0.5211572", "0.5206511", "0.52055675" ]
0.6455378
0
This function computes the distribution internal parameters from its two first moments.
def _compute_internals(self, moments): [mean, stdv] = moments cov = stdv / mean zeta = np.sqrt(np.log(1. + cov ** 2.)) LAMBDA = np.log(mean) - 0.5 * zeta ** 2. internals = {} internals['LAMBDA'] = LAMBDA internals['zeta'] = zeta return internals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['mu'] = mean\n internals['sigma'] = stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a'] = mean - np.sqrt(3) * stdv\n internals['b'] = mean + np.sqrt(3) * stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a'] = mean - np.sqrt(3) * stdv\n internals['b'] = mean + np.sqrt(3) * stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['k'] = mean ** 2. / stdv ** 2.\n internals['LAMBDA'] = mean / stdv ** 2.\n\n return internals", "def calc_moments(distribution):\n x = torch.linspace(2, 22, 31)\n d_mean = torch.sum(x * distribution)\n d_var = torch.sum(distribution * (x - d_mean) ** 2) \n \n return d_mean, torch.sqrt(d_var)", "def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n mu = 0\r\n Cov = 0\r\n\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n\r\n X = []\r\n for i in x:\r\n j = [1,i[0]]\r\n X.append(j)\r\n X = np.array(X)\r\n\r\n common = np.matmul( X.T , X) + np.identity(2) * sigma2/beta\r\n common = np.linalg.inv(common)\r\n Cov = common * sigma2\r\n mu = np.matmul(common , np.matmul (X.T , z) )\r\n mu = mu.flatten()\r\n print(\"X.shape: \" , X.shape)\r\n print(\"z.shape: \",z.shape)\r\n print(\"Cov.shape\" , Cov.shape)\r\n print(\"mu.shape: \",mu.shape)\r\n density = util.density_Gaussian(mu , Cov , x_s).reshape(150 , 150 ).T\r\n print(\"density.shape\",density.shape)\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n\r\n \r\n\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )))\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.xlim = (-1,1)\r\n plt.ylim = (-1,1)\r\n plt.title('p(a|x1,z1....xn,zn) for '+ str(len(x)) +' samples')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x')\r\n\r\n return (mu,Cov)", "def moments(self):", "def parameters(self):\n\n m = self.__m\n s = linalg.cholesky(self.__prod).transpose()\n w = self.__weight\n\n # Compute the parameters of the posterior distribution.\n return linalg.solve(s[:m, :m], s[:m, m:]), \\\n np.dot(s[:m, :m].transpose(), s[:m, :m]), \\\n np.dot(s[m:, m:].transpose(), s[m:, m:]) / w, \\\n w", "def parameters_to_marginal_moments(prob, distmu, distsigma):\n good = np.isfinite(prob) & np.isfinite(distmu) & np.isfinite(distsigma)\n prob = prob[good]\n distmu = distmu[good]\n distsigma = distsigma[good]\n distmean, diststd, _ = parameters_to_moments(distmu, distsigma)\n rbar = (prob * distmean).sum()\n r2bar = (prob * (np.square(diststd) + np.square(distmean))).sum()\n return rbar, np.sqrt(r2bar - np.square(rbar))", "def _get_distribution_variables(self, R):\n domain, Domain = self.domain_Domain\n phase_name = self.phase_name\n\n R_typ = self.phase_param.R_typ # [m]\n # Particle-size distribution (area-weighted)\n f_a_dist = self.phase_param.f_a_dist(R) # [m-1]\n\n # Ensure the distribution is normalised, irrespective of discretisation\n # or user input\n f_a_dist = f_a_dist / pybamm.Integral(f_a_dist, R) # [m-1]\n\n # Volume-weighted particle-size distribution\n f_v_dist = R * f_a_dist / pybamm.Integral(R * f_a_dist, R) # [m-1]\n\n # Number-based particle-size distribution\n f_num_dist = (f_a_dist / R**2) / pybamm.Integral(\n f_a_dist / R**2, R\n ) # [m-1]\n\n # True mean radii and standard deviations, calculated from the f_a_dist that\n # was given, all have units [m]\n R_num_mean = pybamm.Integral(R * f_num_dist, R)\n R_a_mean = pybamm.Integral(R * f_a_dist, R)\n R_v_mean = pybamm.Integral(R * f_v_dist, R)\n sd_num = pybamm.sqrt(pybamm.Integral((R - R_num_mean) ** 2 * f_num_dist, R))\n sd_a = pybamm.sqrt(pybamm.Integral((R - R_a_mean) ** 2 * f_a_dist, R))\n sd_v = pybamm.sqrt(pybamm.Integral((R - R_v_mean) ** 2 * f_v_dist, R))\n\n # X-average the means and standard deviations to give scalars\n # (to remove the \"electrode\" domain, if present)\n R_num_mean = pybamm.x_average(R_num_mean)\n R_a_mean = pybamm.x_average(R_a_mean)\n R_v_mean = pybamm.x_average(R_v_mean)\n sd_num = pybamm.x_average(sd_num)\n sd_a = pybamm.x_average(sd_a)\n sd_v = pybamm.x_average(sd_v)\n\n # X-averaged distributions, or broadcast\n if R.domains[\"secondary\"] == [f\"{domain} electrode\"]:\n f_a_dist_xav = pybamm.x_average(f_a_dist)\n f_v_dist_xav = pybamm.x_average(f_v_dist)\n f_num_dist_xav = pybamm.x_average(f_num_dist)\n else:\n f_a_dist_xav = f_a_dist\n f_v_dist_xav = f_v_dist\n f_num_dist_xav = f_num_dist\n\n # broadcast\n f_a_dist = pybamm.SecondaryBroadcast(f_a_dist_xav, [f\"{domain} electrode\"])\n f_v_dist = pybamm.SecondaryBroadcast(f_v_dist_xav, [f\"{domain} electrode\"])\n f_num_dist = pybamm.SecondaryBroadcast(\n f_num_dist_xav, [f\"{domain} electrode\"]\n )\n\n variables = {\n f\"{Domain} {phase_name}particle sizes\": R / R_typ,\n f\"{Domain} {phase_name}particle sizes [m]\": R,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_a_dist,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_v_dist,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" distribution [m-1]\": f_num_dist,\n f\"{Domain} area-weighted mean particle radius [m]\": R_a_mean,\n f\"{Domain} volume-weighted mean particle radius [m]\": R_v_mean,\n f\"{Domain} number-based mean particle radius [m]\": R_num_mean,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_a,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_v,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" standard deviation [m]\": sd_num,\n # X-averaged sizes and distributions\n f\"X-averaged {domain} {phase_name}particle sizes [m]\": pybamm.x_average(R),\n f\"X-averaged {domain} area-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_a_dist_xav,\n f\"X-averaged {domain} volume-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_v_dist_xav,\n f\"X-averaged {domain} number-based {phase_name}particle-size \"\n \"distribution [m-1]\": f_num_dist_xav,\n }\n\n return variables", "def grd_posterior_gaussian(self, ) -> Tuple[np.ndarray, np.ndarray]:\n xmin, xmax = self.x_range\n ymin, ymax = self.y_range\n\n mu = np.array([0, 0])\n sigma = np.zeros((2, 2))\n\n _sample = self._sample\n _prior = self.prior\n\n def mean_x(x: float, y: float):\n return x * _sample(x, y) * _prior.eval(x, y)\n\n def mean_y(x: float, y: float):\n return y * _sample(x, y) * _prior.eval(x, y)\n\n def var_x(x: float, y: float):\n return x * mean_x(x, y)\n\n def var_y(x: float, y: float):\n return y * mean_y(x, y)\n\n # def var_xy(x: float, y: float):\n # return x * mean_y(x, y)\n\n # First moment\n (mu[0], mu[1]) = (integrate.dblquad(mean_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(mean_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0])\n (sigma[0, 0], sigma[1, 1]) = \\\n (integrate.dblquad(var_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(var_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n # integrate.dblquad(var_xy, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n return mu, sigma", "def calc_parameters(T, N, sigma, r, div):\n dt = T / N\n u = np.exp(sigma * np.sqrt(dt))\n d = 1 / u\n b = r - div\n q = 1 / 2 + 1 / 2 * (b - 1 / 2 * sigma ** 2) * np.sqrt(dt) / sigma # P(up movement)\n return dt, u, d, q, b", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n return paramDict", "def calc_parameters(T, N, sigma, r, div):\n dt = T/N\n u = np.exp(sigma*np.sqrt(dt))\n d = 1/u\n b = r-div\n q = 1/2 + 1/2 * (b - 1/2 * sigma**2)*np.sqrt(dt)/sigma # P(up movement)\n return(dt, u, d, q, b)", "def random():\n # only care about the value of second_moment:\n # curve = scale * e**(-second_moment^2 q^2)/q^2\n # scale = 6 pi/100 (contrast/density*absorbed_amount)^2 * Vf/radius\n # the remaining parameters can be randomly generated from zero to\n # twice the default value as done by default in compare.py\n pars = dict(\n scale=1,\n second_moment=10**np.random.uniform(1, 3),\n )\n return pars", "def N2_f(d1,d2,rho):\n import statsmodels.sandbox.distributions.extras as extras\n muStandardNormal=0.0 # mean of a standard normal distribution \n varStandardNormal=1.0 # variance of standard normal distribution \n upper=([d1,d2]) # upper bound for two values\n v=varStandardNormal # simplify our notations\n mu=muStandardNormal # simplify our notations\n covM=([v,rho],[rho,v])\n return extras.mvnormcdf(upper,mu,covM)", "def get_means_and_scales(self):\n return self.optim.parameters[::2], np.exp(self.optim.parameters[1::2])", "def initializeDistribution(self):\n self.convertToDistrDict['Laguerre'] = self.convertLaguerreToGamma\n self.convertToQuadDict ['Laguerre'] = self.convertGammaToLaguerre\n self.measureNormDict ['Laguerre'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed):\n # and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low)\n #self.lowerBoundUsed = 0.0\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Laguerre'\n self.preferredPolynomials = 'Laguerre'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = 0.0\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low,a,b)", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n paramDict['low'] = self.low\n return paramDict", "def parameters(conv, orthogonal):\n nrm = operator_one_norm(conv.weight).detach().cpu().numpy()\n\n if nrm > 15:\n print('Overflow likely, norm={}'.format(nrm))\n\n m = np.arange(1, len(THETA) + 1)\n vals = m * np.ceil(nrm / THETA)\n mstar = min(1 + np.argmin(vals), 56)\n s = int(np.ceil(nrm / THETA[mstar - 1]))\n\n return mstar, s", "def get_initial_parameters(token_segs):\r\n estems = {} # tracks the average probability of each root\r\n esuffix = {} # tracks the average probability of each suffix\r\n etrans = {} # tracks the average probability of each (transition, feature) pair\r\n eftrans = {} # tracks the average probability of each feature (interface between stem and suffix)\r\n\r\n # collect the probabilities of each object, to be normalized (divided by their totals) later\r\n for ts_list in token_segs:\r\n avg_prob = 1.0 / len(ts_list)\r\n for ts in ts_list:\r\n root = ts.root\r\n rand_val = 1.0\r\n if root in estems:\r\n estems[root] += rand_val * avg_prob\r\n else: estems[root] = rand_val * avg_prob\r\n\r\n suffix = ts.suffix\r\n if suffix in esuffix:\r\n esuffix[suffix] += rand_val * avg_prob\r\n else: esuffix[suffix] = rand_val * avg_prob\r\n\r\n trans = ts.trans\r\n ftrans = feature(root, suffix)\r\n if (trans, ftrans) in etrans:\r\n etrans[(trans, ftrans)] += rand_val * avg_prob\r\n else: etrans[(trans, ftrans)] = rand_val * avg_prob\r\n\r\n if ftrans in eftrans:\r\n eftrans[ftrans] += rand_val * avg_prob\r\n else: eftrans[ftrans] = rand_val * avg_prob\r\n\r\n # divide by the totals\r\n probstems = estems\r\n probsum = sum(probstems.values())\r\n for stem in probstems:\r\n probstems[stem] /= probsum\r\n\r\n probsuffix = esuffix\r\n probsum = sum(probsuffix.values())\r\n for suffix in probsuffix:\r\n probsuffix[suffix] /= probsum\r\n\r\n probtrans = etrans\r\n for trans, ftrans in probtrans:\r\n probtrans[(trans, ftrans)] /= eftrans[ftrans]\r\n\r\n return probstems, probsuffix, probtrans", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def get_distribution(self):\n\n # If the distributions have been updated before.\n if self.update_number > 0:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number,1):\n probability = self.document_distribution_over_topic[m][k] / self.update_number\n self.document_distribution_over_topic[m][k] = probability\n for k in range(0, self.topic_number,1):\n for v in range(0, self.term_number,1):\n probability = self.topic_distribution_over_term[k][v] / self.update_number\n self.topic_distribution_over_term[k][v] = probability\n # The distributions have not been updated once.\n else:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number, 1):\n self.document_distribution_over_topic[m][k] = (\n (self.document_topic_count_matrix[m][k] + self.alpha[k]) / (\n self.sum_document_by_topic_count[m] + self.sum_alpha))\n for k in range(0, self.topic_number, 1):\n for v in range(0, self.term_number, 1):\n self.topic_distribution_over_term[k][v] = (\n (self.topic_term_count_matrix[k][v] + self.beta[v]) / (\n self.sum_topic_by_term_count[k] + self.sum_beta))", "def generate_moments(hyper, params):\n\n k, d = hyper['k'], hyper['d']\n\n p = params # Shorthand, don't judge\n m = {} # Moments\n for x1 in xrange(1,d+1):\n m[(x1,)] = sum( p[(h,x1)] * p[(h,)] for h in xrange(1,k+1) )\n for x2 in xrange(1,d+1):\n m[(x1,x2)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,)] for h in xrange(1,k+1) )\n for x3 in xrange(1,d+1):\n m[(x1,x2,x3)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,x3)] * p[(h,)] for h in xrange(1,k+1) )\n return m", "def update_moments_r(self):\n denominator = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 )\n nominator1 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 ) * self.constellation\n \n nominator2 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2) * np.power(self.constellation, 2)\n try:\n \n moment1 = nominator1.sum(axis=1) / denominator.sum(axis=1)\n moment2 = nominator2.sum(axis=1) / denominator.sum(axis=1)\n assert np.all(np.logical_not(np.isnan(moment1))) and np.all(np.logical_not(np.isnan(moment2)))\n except:\n print(\"Oops! That was no valid number. Try again...\")\n\n \n self.mu = moment1\n return moment1, moment2", "def var_parameters(jd,mag,err):\n\n mean = np.mean(mag)\n nepochs = float(len(jd))\n\n chi = np.sum( (mag - mean)**2. / err**2. )\n p_chi = chi2.cdf(chi,(nepochs-1))\n\n\n a = (mag-mean)**2\n ex_var = (np.sum(a-err**2)/((nepochs*(mean**2))))\n sd = np.sqrt((1./(nepochs-1))*np.sum(((a-err**2)-ex_var*(mean**2))**2))\n ex_verr = sd/((mean**2)*np.sqrt(nepochs))\n\n\n return p_chi, ex_var, ex_verr", "def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n m_v = np.zeros(2)\r\n print(\"m_v shape: \" ,m_v.shape)\r\n c_v = np.array( [ [ beta , 0 ] , [ 0 , beta ] ] )\r\n print(\"c_v shape: \",c_v.shape)\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n print(\"x_s shape: \",x_s.shape)\r\n density = util.density_Gaussian(m_v , c_v , x_s)\r\n #print(density)\r\n print(\"length density \",len(density))\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )) )\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.title('p(a)')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x')\r\n return", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['strategy'] = self.strategy\n paramDict['nPoints'] = self.nPoints\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar # rate parameter\n paramDict['low' ] = self.low # lower domain boundary\n return paramDict", "def PH2From3Moments (moms, prec=1e-14):\n\n m1, m2, m3 = moms\n\n # check moment boounds\n m2l = APH2ndMomentLowerBound(m1, 2) \n m3l = APH3rdMomentLowerBound(m1, m2, 2) \n m3u = APH3rdMomentUpperBound(m1, m2, 2) \n \n if m2<m2l:\n raise Exception(\"The given second moment is not feasible!\") \n if m3<m3l:\n raise Exception(\"The given third moment is not feasible (too small)!\")\n if m3>m3u:\n raise Exception(\"The given third moment is not feasible (too large)!\")\n \n # check if we have an exponential distribution\n if abs(m2/m1/m1-2.0) < prec:\n return (np.matrix([1]), np.matrix([[-1/m1]]))\n \n # calculate parameters\n b = 3.0*m1*m2-m3\n c = 3.0*m2*m2-2.0*m1*m3\n e = -2.0*m1*m1+m2\n a = b*b+6.0*c*e\n if a<0:\n a = 0\n a = math.sqrt(a)\n if c>0:\n lambda1 = (b - a) / c\n lambda2 = (b + a) / c\n p = (-b-6.0*m1*e+a) / (b+a)\n elif c<0:\n lambda1 = (b + a) / c\n lambda2 = (b - a) / c\n p = (b+6.0*m1*e+a) / (-b+a)\n elif c==0:\n lambda1 = 0\n lambda2 = 1.0 / m1\n p = 0\n \n # return the result\n return (np.matrix([p,1.0-p]), np.matrix([[-lambda1, lambda1], [0,-lambda2]]))", "def prop_func_form_params(param1,param2,*arg):\n return np.log(MH.simple_2D_Gauss(param1-param2,arg[0],arg[1]))", "def fdist(param1, param2):\n return(prng.gamma(param1, param2))", "def initializeDistribution(self):\n self.convertToDistrDict['Jacobi'] = self.convertJacobiToBeta\n self.convertToQuadDict ['Jacobi'] = self.convertBetaToJacobi\n self.measureNormDict ['Jacobi'] = self.stdProbabilityNorm\n #this \"if\" section can only be called if distribution not generated using readMoreXML\n if (not self.upperBoundUsed) and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,self.low)\n else:\n if self.lowerBoundUsed == False:\n a = 0.0\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,a,b,self.low)\n self.preferredPolynomials = 'Jacobi'\n self.compatibleQuadrature.append('Jacobi')\n self.compatibleQuadrature.append('ClenshawCurtis')", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['apex' ] = self.apex\n paramDict['min' ] = self.min\n paramDict['max' ] = self.max\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mu' ] = self.mu\n return paramDict", "def distribution(self):\n \n #external_distribution serves both the purpose of external setting of distribution and the caching of distribution()\n if self.external_distribution:\n return self.external_distribution_array;\n else:\n energy_vector = []\n superset = self.generate_superset(0) \n \n for i in superset:\n state = self.ket(i)\n \n norm_squared = np.dot(state.T, state)\n \n if norm_squared > 0: #zero is appended at the end\n energy = np.dot(state.T, np.dot( self.epsilon, state))\n interaction = np.dot(state.T, np.dot( self.u, state))/2.0 #divide by two. Otherwise, <l r| U |l r > = U_LR + U_RL = 2U\n #print state, np.dot(self.u, state) \n #print interaction\n energy_vector.append( energy + interaction )\n \n energy_vector.insert(0, 0.0) \n probability = np.exp( np.multiply(-self.beta, energy_vector)) \n probability /= probability.sum() \n return probability", "def initializeDistribution(self):\n self.convertToDistrDict['Hermite'] = self.convertHermiteToNormal\n self.convertToQuadDict ['Hermite'] = self.convertNormalToHermite\n self.measureNormDict ['Hermite'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed) and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicNormalDistribution(self.mean,\n self.sigma)\n self.lowerBound = -sys.float_info.max\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Hermite'\n self.preferredPolynomials = 'Hermite'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = -sys.float_info.max\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicNormalDistribution(self.mean,\n self.sigma,\n a,b)", "def prop_dist_form_params(*arg):\n return np.random.multivariate_normal(*arg)", "def _get_prior_params(self):\n # relation transformation matrix\n M_mu = np.eye(self.n_polarities, dtype=\"float32\")\n M_mu[1, :] = [0., 0.3, 0.]\n M_mu = np.tile(M_mu, (self.n_rels, 1)).reshape(\n self.n_rels, self.n_polarities, self.n_polarities\n )\n # for rel, rel_idx in iteritems(self.rel2idx):\n # # swap axes for contrastive relations\n # if check_rel(rel, CONTRASTIVE_RELS):\n # mu_i = M_mu[rel_idx]\n # mu_i[[0, 2]] = mu_i[[2, 0]]\n M_mu = torch.tensor(M_mu)\n M_sigma = torch.tensor(\n np.ones((self.n_rels, self.n_polarities, self.n_polarities),\n dtype=\"float32\")\n )\n # beta\n beta_p = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n beta_q = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n # z_epsilon\n z_epsilon_p = torch.tensor(1.)\n z_epsilon_q = torch.tensor(15.)\n # scale factor\n scale_factor = torch.tensor(34.)\n return {\"M_mu\": M_mu, \"M_sigma\": M_sigma, \"beta_p\": beta_p,\n \"beta_q\": beta_q, \"z_epsilon_p\": z_epsilon_p,\n \"z_epsilon_q\": z_epsilon_q, \"scale_factor\": scale_factor}", "def demo_indef():\n n_dim = 2\n A = np.eye(n_dim)\n A[1, 1] = -1.\n covar = np.eye(n_dim)\n mean = np.zeros(n_dim)\n approx = approx_quad_form(mean, covar, A)\n\n # Sample from true dist\n n_sample = 10000\n x = np.random.multivariate_normal(mean, covar, n_sample)\n q_samples = np.zeros(n_sample)\n for i in range(n_sample):\n q_samples[i] = x[i] @ A @ x[i]\n\n q = np.linspace(-10, 10)\n\n plt.plot(\n q, approx(q), label='Approx.',\n color='tab:blue', linestyle='--')\n bins = np.linspace(-8, 8, 81)\n bins[0] = -np.inf\n bins[-1] = np.inf\n plt.hist(\n q_samples, density=True, histtype='stepfilled',\n bins=bins,\n alpha=0.5, color='black', label='Samples')\n plt.xlabel('q')\n plt.ylabel('pdf(q) [-]')\n plt.legend()\n\n central_moments_sample = scipy.stats.moment(\n q_samples, moment=[0, 1, 2, 3, 4])\n print(central_moments_sample)", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['mu'] = self.mean\n retDict['sigma'] = self.sigma\n return retDict", "def initializeDistribution(self):\n self.raiseAMessage('initialize distribution')\n mu = distribution1D.vectord_cxx(len(self.mu))\n for i in range(len(self.mu)):\n mu[i] = self.mu[i]\n covariance = distribution1D.vectord_cxx(len(self.covariance))\n for i in range(len(self.covariance)):\n covariance[i] = self.covariance[i]\n if self.method == 'spline':\n if self.covarianceType != 'abs':\n self.raiseAnError(IOError,'covariance with type ' + self.covariance + ' is not implemented for ' + self.method + ' method')\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu)\n elif self.method == 'pca':\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu, str(self.covarianceType), self.rank)\n if self.transformation:\n self.lowerBound = [-sys.float_info.max]*self.rank\n self.upperBound = [sys.float_info.max]*self.rank\n else:\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimension)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimension)]", "def _init_params(self):\n self.W_ems = []\n self.b_ems = []\n if self.rank_n_approx:\n W_em1 = self.init_fn[0](self.n_in,\n self.rank_n_approx,\n self.sparsity[0],\n self.scale[0],\n self.rng)\n W_em2 = self.init_fn[0](self.rank_n_approx,\n self.n_hids[0],\n self.sparsity[0],\n self.scale[0],\n self.rng)\n self.W_em1 = theano.shared(W_em1,\n name='W1_0_%s'%self.name)\n self.W_em2 = theano.shared(W_em2,\n name='W2_0_%s'%self.name)\n self.W_ems = [self.W_em1, self.W_em2]\n\n else:\n W_em = self.init_fn[0](self.n_in,\n self.n_hids[0],\n self.sparsity[0],\n self.scale[0],\n self.rng)\n self.W_em = theano.shared(W_em,\n name='W_0_%s'%self.name)\n self.W_ems = [self.W_em]\n\n self.b_em = theano.shared(\n self.bias_fn[0](self.n_hids[0], self.bias_scale[0],self.rng),\n name='b_0_%s'%self.name)\n self.b_ems = [self.b_em]\n\n for dx in range(1, self.n_layers):\n W_em = self.init_fn[dx](self.n_hids[dx-1] / self.pieces[dx],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n self.rng)\n W_em = theano.shared(W_em, name='W_%d_%s'%(dx,self.name))\n self.W_ems += [W_em]\n\n b_em = theano.shared(\n self.bias_fn[dx](self.n_hids[dx], self.bias_scale[dx],self.rng),\n name='b_%d_%s'%(dx,self.name))\n self.b_ems += [b_em]\n\n self.params = [x for x in self.W_ems]\n\n if self.learn_bias and self.learn_bias!='last':\n self.params = [x for x in self.W_ems] + [x for x in self.b_ems]\n elif self.learn_bias == 'last':\n self.params = [x for x in self.W_ems] + [x for x in\n self.b_ems][:-1]\n self.params_grad_scale = [self._grad_scale for x in self.params]\n if self.weight_noise:\n self.nW_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_ems]\n self.nb_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_ems]\n\n self.noise_params = [x for x in self.nW_ems] + [x for x in self.nb_ems]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape)\n for x in self.noise_params]", "def Rosenblatt_Transform(dist, x_i): \n if dist.stats(moments = 's') > 1 or dist.stats(moments = 's') < -1:\n \n x_N_mean = dist.median()\n x_N_std = (x_i - x_N_mean)/sst.norm.ppf(dist.cdf(x_i))\n \n return(x_N_mean, x_N_std)\n \n else:\n x_N_std = sst.norm.pdf(sst.norm.ppf(dist.cdf(x_i)))/dist.pdf(x_i)\n x_N_mean = x_i - sst.norm.ppf(dist.cdf(x_i))*x_N_std\n return(x_N_mean, x_N_std)", "def _get_marginal_pdfs( res, nbins=51, verbose=True ):\n\tvparam_names = res.vparam_names\n\tweights = res.weights\n\tsamples = res.samples\n\n\tpdfdict = {}\n\n\tfor param in vparam_names :\n\t\tipar = vparam_names.index( param )\n\t\tparamvals = samples[:,ipar]\n\n\t\tif nbins>1:\n\t\t\tif param in res.bounds :\n\t\t\t\tparvalmin, parvalmax = res.bounds[param]\n\t\t\telse :\n\t\t\t\tparvalmin, parvalmax = 0.99*paramvals.min(), 1.01*paramvals.max()\n\t\t\tparambins = np.linspace( parvalmin, parvalmax, nbins, endpoint=True ).flatten()\n\t\t\tbinindices = np.digitize( paramvals, parambins )\n\n\t\t\t# we estimate the marginalized pdf by summing the weights of all points in the bin,\n\t\t\t# where the weight of each point is the prior volume at that point times the\n\t\t\t# likelihood, divided by the total evidence\n\t\t\tpdf = np.array( [ weights[np.where( binindices==ibin )].sum() for ibin in range(len(parambins)) ] )\n\t\telse :\n\t\t\tparambins = None\n\t\t\tpdf = None\n\n\n\t\tmean = (weights * samples[:,ipar]).sum()\n\t\t#print(samples[:,ipar]-mean)\n\t\t#print(weights)\n\t\tstd = np.sqrt( (weights * (samples[:,ipar]-mean)**2 ).sum() )\n\n\n\t\tpdfdict[param] = (parambins,pdf,mean,std,res.logz)\n\n\t\tif verbose :\n\t\t\tif np.abs(std)>=0.1:\n\t\t\t\tprint( ' <%s> = %.2f +- %.2f'%( param, np.round(mean,2), np.round(std,2)) )\n\t\t\telif np.abs(std)>=0.01:\n\t\t\t\tprint( ' <%s> = %.3f +- %.3f'%( param, np.round(mean,3), np.round(std,3)) )\n\t\t\telif np.abs(std)>=0.001:\n\t\t\t\tprint( ' <%s> = %.4f +- %.4f'%( param, np.round(mean,4), np.round(std,4)) )\n\t\t\telse :\n\t\t\t\tprint( ' <%s> = %.3e +- %.3e'%( param, mean, std) )\n\n\n\t\tif param == 'x0' :\n\t\t\tsalt2 = sncosmo.Model( source='salt2')\n\t\t\tsalt2.source.set_peakmag( 0., 'bessellb', 'ab' )\n\t\t\tx0_AB0 = salt2.get('x0')\n\t\t\tmBmean = -2.5*np.log10( mean / x0_AB0 )\n\t\t\tmBstd = 2.5*np.log10( np.e ) * std / mean\n\t\t\tmBbins = -2.5*np.log10( parambins / x0_AB0 )\n\n\t\t\tpdfdict['mB'] = ( mBbins, pdf, mBmean, mBstd )\n\t\t\tif verbose:\n\t\t\t\tprint( ' <%s> = %.3f +- %.3f'%( 'mB', np.round(mBmean,3), np.round(mBstd,3)) )\n\n\treturn( pdfdict )", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict", "def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n\r\n # mean of posterior distribution is the MAP estimate of the weights a\r\n # tau^2(from notes) is beta\r\n\r\n extra_col = np.ones((x.shape[0], 1))\r\n x = np.append(extra_col, x, axis = 1)\r\n\r\n alpha_map = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))@(np.transpose(x)@z)\r\n mu = alpha_map\r\n\r\n Cov = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))*sigma2\r\n\r\n num_x = 100\r\n num_y = 100\r\n\r\n xvalues = np.linspace(-1, 1, num = num_x)\r\n yvalues = np.linspace(-1, 1, num = num_y)\r\n X_grid, Y_grid = np.meshgrid(xvalues, yvalues)\r\n\r\n samples = np.column_stack((X_grid.flatten(), Y_grid.flatten()))\r\n\r\n density = util.density_Gaussian(mu.squeeze(), Cov, samples)\r\n density_grid = np.reshape(density, (num_x, num_y))\r\n\r\n plt.figure(1)\r\n plt.title(\"Posterior Distribution of α Given 5 Data Points\")\r\n plt.xlabel('$α_0$')\r\n plt.ylabel('$α_1$')\r\n plt.scatter(-0.1, -0.5, c='r')\r\n plt.contour(X_grid, Y_grid, density_grid, cmap=plt.cm.winter)\r\n plt.show()\r\n\r\n return (mu,Cov)", "def get_prob_for_distributions(p):\n w1 = p[0]\n mu1 = p[1]\n sigma1 = p[2]\n w2 = p[3]\n mu2 = p[4]\n sigma2 = p[5]\n w3 = p[6]\n mu3 = p[7]\n sigma3 = p[8]\n dist_range = (0, 4.330310991999920844e+01)\n x = np.linspace(dist_range[0], dist_range[1], 1000)\n A1 = np.array(w1 * mlab.normpdf(x, mu1, sigma1)).sum()\n A2 = np.array(w2 * mlab.normpdf(x, mu2, sigma2)).sum()\n A3 = np.array(w3 * mlab.normpdf(x, mu3, sigma3)).sum()\n p1 = A1 / (A1 + A2 + A3)\n p2 = A2 / (A1 + A2 + A3)\n p3 = A3 / (A1 + A2 + A3)\n return p1, p2, p3", "def Get_params(numparams, dt, D):\n # bounds from table 1 Kowalek et al 2020\n Nmin, Nmax = 30, 600\n Bmin, Bmax = 1, 6\n Rmin, Rmax = 1, 17\n alphamin, alphamax = 0.3, 0.7\n Qmin, Qmax = 1, 9\n\n # Gen parameters\n Q = np.random.uniform(Qmin, Qmax, size=numparams)\n Q1, Q2 = Q, Q\n\n NsND = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsAD = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsCD = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsDM = np.random.randint(Nmin, Nmax + 1, size=numparams)\n TDM = NsDM * dt\n\n B = np.random.uniform(Bmin, Bmax, size=numparams)\n r_c = np.sqrt(D * NsCD * dt / B) # solving for r_c in eq. 8 Kowalek\n\n R = np.random.uniform(Rmin, Rmax, size=numparams)\n v = np.sqrt(R * 4 * D / TDM) # solving for v in eq. 7 Kowalek\n\n alpha = np.random.uniform(alphamin, alphamax, size=numparams)\n\n # Compute sigma for ND, AD, CD from eq. 12 Kowalek\n sigmaND = np.sqrt(D * dt) / Q1\n sigmaAD = np.sqrt(D * dt) / Q1\n sigmaCD = np.sqrt(D * dt) / Q1\n\n # Compute sigma for DM from eq. 12 Kowalek\n sigmaDM = np.sqrt(D * dt + v ** 2 * dt ** 2) / Q2\n\n return np.array(\n [\n NsND,\n NsAD,\n NsCD,\n NsDM,\n D * np.ones(numparams),\n dt * np.ones(numparams),\n r_c,\n v,\n alpha,\n sigmaND,\n sigmaAD,\n sigmaCD,\n sigmaDM,\n ]\n ).T", "def _calpara(self):\n self.up = math.exp(self.sigma*math.sqrt(self.deltatime))\n self.down = math.exp(-self.sigma*math.sqrt(self.deltatime))\n self.upprob = (math.exp((self.r-self.d)*self.deltatime)-self.down)/(self.up-self.down)", "def APH2ndMomentLowerBound (m1, n):\n\n return float(m1)*m1*(n+1) / n", "def _construct_mom_stuff(self):\n a = self.mom_mix_rate\n dist_mean = self.GN.dist_mean\n dist_cov = self.GN.dist_cov\n # Get the generated sample observations for this batch, transformed\n # linearly into the desired space for moment matching...\n X_b = T.dot(self.GN.output, self.mom_match_proj)\n # Get their mean\n batch_mean = T.mean(X_b, axis=0)\n # Get the updated generator distribution mean\n new_mean = ((1.0 - a[0]) * self.GN.dist_mean) + (a[0] * batch_mean)\n # Use the mean to get the updated generator distribution covariance\n X_b_minus_mean = X_b - new_mean\n # Whelp, I guess this line needs the cast... for some reason...\n batch_cov = T.dot(X_b_minus_mean.T, X_b_minus_mean) / T.cast(X_b.shape[0], 'floatX')\n new_cov = ((1.0 - a[0]) * self.GN.dist_cov) + (a[0] * batch_cov)\n # Get the cost for deviation from the target distribution's moments\n mean_err = new_mean - self.target_mean\n cov_err = (new_cov - self.target_cov)\n mm_cost = self.mom_match_weight[0] * \\\n (T.sum(mean_err**2.0) + T.sum(cov_err**2.0))\n # Construct the updates for the running estimates of the generator\n # distribution's first and second-order moments.\n mom_updates = OrderedDict()\n mom_updates[self.GN.dist_mean] = new_mean\n mom_updates[self.GN.dist_cov] = new_cov\n return [mm_cost, mom_updates]", "def aicpdf(xvals, distribution, params):\n if distribution == 'pareto':\n pvals = (params['xmin'] * params['mu'] ** params['xmin']) / (xvals ** (params['xmin'] + 1))\n return pvals\n \n elif distribution == 'lognormal':\n #import pdb; pdb.set_trace()\n pvals = np.exp(-(np.log(xvals) - params['mu'])**2 / (2 * params['sigma']**2)) / (xvals * params['sigma'] * np.sqrt(2*np.pi))\n return pvals\n \n elif distribution == 'normal':\n pvals = np.exp(-(xvals - params['mu'])**2 / (2 * params['sigma']**2)) / (params['sigma'] * np.sqrt(2*np.pi))\n return pvals\n \n elif distribution == 'exponential':\n pvals = params['lambda'] * np.exp(-params['lambda'] * xvals)\n return pvals \n \n elif distribution == 'boundedpl':\n #pvals = (params['mu'] * (params['mu'] ** params['xmax'] - params['xmin'] ** params['xmax'])) / (xvals ** (params['mu'] + 1))\n #mu * (xmax ^ mu - xmin ^ mu) / x ^ (mu+1)\n pvals = (params['mu'] * (params['xmax'] ** params['mu'] - params['xmin'] ** params['mu'])) / (xvals ** (params['mu'] + 1))\n return pvals", "def moment(self, n, mu, sigma):\n return scipy_norm.moment(n, mu, sigma)", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def computeMoments(x):\n return (abs(stats.skew(x)),abs(stats.kurtosis(x,None,True)))", "def initializeDistribution(self):\n self.minVal = min(math.exp(self.upperBound),math.exp(self.lowerBound))\n self.maxVal = max(math.exp(self.upperBound),math.exp(self.lowerBound))", "def initializeDistribution(self):\n self.convertToDistrDict['Legendre'] = self.convertLegendreToUniform\n self.convertToQuadDict ['Legendre'] = self.convertUniformToLegendre\n self.measureNormDict ['Legendre'] = self.stdProbabilityNorm\n self.convertToDistrDict['ClenshawCurtis'] = self.convertLegendreToUniform\n self.convertToQuadDict ['ClenshawCurtis'] = self.convertUniformToLegendre\n self.measureNormDict ['ClenshawCurtis'] = self.stdProbabilityNorm\n self._distribution = distribution1D.BasicUniformDistribution(self.lowerBound,self.lowerBound+self.range)", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale))", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['mu'] = self.mean\n retDict['sigma'] = self.sigma\n retDict['low'] = self.low\n return retDict", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def estimate_gaussian_params(X):\n mu = X.mean(axis=0)\n var = X.std(axis=0)**2.0\n return mu,var", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::vbfH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)\", mu)')", "def like_one(theta,dt,dmag,sigma):\n\n gamma, A = theta\n aux=(1/np.sqrt(2*np.pi*Veff2(dt,sigma,A,gamma)))*np.exp(-1.0*(dmag**2)/(2.0*Veff2(dt,sigma,A,gamma)))\n\n return aux", "def update_params(x, prior, posterior):\r\n mu0, kappa0, alpha0, beta0 = prior\r\n mu_t, kappa_t, alpha_t, beta_t = posterior\r\n return np.r_[mu0, (kappa_t*mu_t + x)/(kappa_t + 1)], \\\r\n np.r_[kappa0, kappa_t + 1], \\\r\n np.r_[alpha0, alpha_t + 0.5], \\\r\n np.r_[beta0, beta_t + 0.5*kappa_t*(x - mu_t)**2/(kappa_t + 1)]", "def prior_params_tree(self):\n id = {name:i for i, name in enumerate(list(self.tree.keys()))}\n n_nodes = len(id)\n dist_mx = np.zeros((n_nodes, n_nodes))\n\n for node1, edges in self.tree.items():\n for node2, dist in edges.dist:\n dist_mx[id[node1], id[node2]] = dist\n dist_mx[id[node2], id[node1]] = dist\n\n # while np.count_nonzero(dist_mx) < (n_nodes ** 2 - n_nodes):\n for _ in range(20):\n for i, j in combinations(range(n_nodes), 2):\n if dist_mx[i,j] > 0:\n continue\n row_i = dist_mx[i]\n row_j = dist_mx[j]\n value = (row_i + row_j) * (row_i > 0) * (row_j > 0)\n dist_mx[i, j] = dist_mx[j, i] = - max(np.unique(value))\n dist_mx = np.abs(dist_mx)\n\n evolve_rate = []\n for node1, node2 in combinations(self.m_cov.keys(), 2):\n mx_cov_dist = np.abs(self.m_cov[node1] - self.m_cov[node2])\n elements = mx_cov_dist[np.triu_indices(len(mx_cov_dist))]\n norm_elements = elements / dist_mx[id[node2], id[node1]]\n evolve_rate += list(norm_elements)\n\n\n\n df = np.mean([p.shape[0] for _, p in self.m_profiles.items()])\n p_theta_alpha = df/2\n # p_theta_alpha = 4\n p_theta_beta = np.percentile(evolve_rate, 75) * (p_theta_alpha - 1)\n # print(p_theta_alpha, p_theta_beta)\n return p_theta_alpha, p_theta_beta", "def __init__(self, \n param_epsilon, \n param_tau,\n param_u, \n param_gamma_left,\n param_gamma_right,\n param_beta):\n self.epsilon = param_epsilon\n self.tau = param_tau\n self.u = param_u\n self.gamma_left = param_gamma_left\n self.gamma_right = param_gamma_right\n \n self.sigma_retarded = 1j * (self.gamma_left + self.gamma_right) / 2.0\n self.sigma_advanced = - self.sigma_retarded;\n \n self.dim = len(self.u)\n self.rho = np.zeros((2**self.dim))\n \n self.beta = param_beta\n \n self.cutoff_chance = 0.0001\n self.external_distribution = False\n self.external_distribution_array = self.distribution()\n self.external_distribution = True", "def _get_parameters(n, j, domain, g, ncap):\n alphas, betas = rc.recurrenceCoefficients(n - 2, lb=domain[0], rb=domain[1],\n j=j, g=g, ncap=ncap)\n omegas = g * np.array(alphas)\n ts = g * np.sqrt(np.array(betas)[1::])\n c0 = np.sqrt(betas[0])\n return omegas, ts, c0", "def aicmle(timeSeries, distribution):\n mlevals = {} \n if distribution == 'pareto':\n mlevals['xmin'] = np.min(timeSeries)\n mlevals['mu'] = 1 - timeSeries.shape[0] / (timeSeries.shape[0] * np.log(mlevals['xmin']) - np.sum(np.log(timeSeries)))\n \n elif distribution == 'lognormal':\n mlevals['mu'] = np.sum(np.log(timeSeries)) / timeSeries.shape[0]\n mlevals['sigma'] = np.sqrt(np.sum( (np.log(timeSeries) - mlevals['mu'])**2) / timeSeries.shape[0])\n \n elif distribution == 'normal':\n mlevals['mu'] = np.mean(timeSeries)\n mlevals['sigma'] = np.sqrt(sum((timeSeries - np.mean(timeSeries))**2) / timeSeries.shape[0])\n \n elif distribution == 'exponential':\n mlevals['lambda'] = 1.0 / np.mean(timeSeries)\n \n elif distribution == 'boundedpl':\n mlevals['xmin'] = np.min(timeSeries)\n mlevals['xmax'] = np.max(timeSeries)\n minmuEstimate = 1.1\n mlevals['mu'] = fmin(lambda mu: -len(timeSeries) * np.log( (mu - 1) / (np.min(timeSeries)**(1 - mu) - np.max(timeSeries)**(1 - mu))) + mu * np.sum(np.log(timeSeries)), minmuEstimate, disp=0)[0]\n\n return mlevals", "def __init__(self, mean=0.0, sigma=1.0):\n super().__init__()\n self.mean = mean\n self.sigma = sigma\n self.hasInfiniteBound = True\n self.type = 'Normal'\n self.distType = 'Continuous'\n self.compatibleQuadrature.append('Hermite')\n self.compatibleQuadrature.append('CDF')\n #THESE get set in initializeDistribution, since it depends on truncation\n #self.preferredQuadrature = 'Hermite'\n #self.preferredPolynomials = 'Hermite'", "def compute_t_params(mu, kappa, alpha, beta):\r\n mu_, sigma2_, dof_ = mu, beta*(kappa + 1)/(alpha*kappa), 2*alpha\r\n return mu_, sigma2_, dof_", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['n' ] = self.n\n paramDict['p' ] = self.p\n return paramDict", "def generate_stat(sample_size, sparsity = 0, amplitude = 0, sigma = 1):\n var = generate_variable(sample_size, sparsity, amplitude, sigma)\n y_obs = var[0]\n \n \"\"\" \n f is equal to -X(t,theta) and we will minimize f (max. X)\n \"\"\"\n def f(x):\n \"\"\" \n f(x)=-X(t,theta) where x[0]=t and x[1]=theta\n \"\"\"\n res = np.real(np.exp(-1j*x[1])*\\\n sum(y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1))) \n res = -res/np.sqrt(2*sample_size+1) \n return res\n \n def grad_f(x):\n \"\"\" \n gradient of f\n \"\"\"\n res1 = np.real(np.exp(-1j*x[1])*\\\n sum(1j*k*y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1)))\n res1 = -res1/np.sqrt(2*sample_size+1)\n \n res2 = np.real(np.exp(-1j*x[1])*\\\n sum(-1j*y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1)))\n res2 = -res2/np.sqrt(2*sample_size+1)\n return np.array([res1, res2])\n \n #% Minimizing f\n \n \"\"\" \n we minimize on [0, 2pi]^2\n \"\"\"\n bnds = ((0, 2*np.pi), (0, 2*np.pi))\n \n \"\"\" \n We begin by a greedy search of the initialization point over a grid of size 126^2\n the initialization point is init\n \"\"\"\n x = y = np.arange(0, 2*np.pi, 0.05)\n steps = 126\n X, Y = np.meshgrid(x, y)\n val = np.array([f([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])\n init = np.argmin(val)\n x1 = init%steps\n x2 = (init-x1)/steps\n init = [x1*0.05, x2*0.05]\n \n \"\"\" \n we minimize f...\n \"\"\"\n result = sco.minimize(f, init, method=\"L-BFGS-B\",\\\n jac=grad_f, bounds=bnds, tol=1e-15)\n \n \"\"\" \n (t1,theta1) is the argmax of X(t, theta) and l1=$\\lambda_1$\n \"\"\"\n t1 = result.x[0]\n theta1 = result.x[1]\n l1 = -f([t1,theta1])\n \n \n \"\"\" \n Function g(x) is equal to (X(t1,theta1)-X(x))/(1-rho((t1,theta1)-x))\n \"\"\"\n def g(x):\n a0 = x[0]-t1\n a1 = x[1]-theta1\n N = 2*sample_size+1\n \n vec = np.array([a0,a1])\n r = np.linalg.norm(vec)\n \"\"\" \n the value for r=0 is set to l1 (note that r=0 corresponds to x=(t1,theta1))\n \"\"\" \n res = l1 \n \n if (0<r) & (r<0.00001):\n \"\"\" \n we look a values near (t1,theta1) for which an indetermination occurs\n \"\"\" \n alpha= np.arccos(np.clip(a0/np.sqrt(a0**2+a1**2), -1.0, 1.0))\n u0 = np.cos(alpha)\n u1 = np.sin(alpha)\n \"\"\" \n u0,u1 defines the direction (unit vector)\n \"\"\"\n denom = sum((k*np.cos(alpha)-np.sin(alpha))**2*\\\n (np.sinc((r*(k*np.cos(alpha)-np.sin(alpha)))/(2*np.pi)))**2\\\n for k in range(-sample_size,sample_size+1))/N\n \"\"\" \n denom computes the denominator\n \"\"\"\n \n# \"\"\" \n# We use simpson rule for the numerator\n# \"\"\"\n# h = np.linspace(0,1,500)\n# \n# b0 = t1 + h*a0\n# b1 = theta1 + h*a1\n# \n# value = (1-h)*(u0**2*\\\n# np.real(np.exp(-1j*b1)*sum(-k**2*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))\\\n# +2*u0*u1*\\\n# np.real(np.exp(-1j*b1)*sum(k*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))\\\n# +u1**2*\\\n# np.real(np.exp(-1j*b1)*sum((-1)*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))) \n# value = value/np.sqrt(N)\n# \n# num = sci.simps(value, h)\n \n \"\"\" \n we use a quadrature for the numerator\n \"\"\" \n fun_int = lambda w: (1-w)*(u0**2*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum(-k**2*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))\\\n +2*u0*u1*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum(k*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))\\\n +u1**2*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum((-1)*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))) \n \n num = np.mean(sci.quad(fun_int, 0, 1, epsabs=1e-15, epsrel=1e-15, limit=1000))\n \n res = -num/denom\n \n if (r>=0.00001):\n \"\"\" \n we look a values far (t1,theta1) for which there is no indetermination\n \"\"\" \n res = (l1+f(x))/(1-(np.cos(a1)*dirichlet(a0,N)/N))\n \n return res \n \"\"\" \n we minimize g on [0, 2pi]^2 an dwe llok for the initialization point\n \"\"\"\n val2 = np.array([g([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])\n init2 = np.argmin(val2)\n x1 = init2%steps\n x2 = (init2-x1)/steps\n init2 = [x1*0.05, x2*0.05] \n result2 = sco.minimize(g, init2, method=\"L-BFGS-B\", bounds=bnds, tol=1e-15) \n \"\"\" \n argmin of g\n \"\"\"\n t2 = result2.x[0]\n theta2 = result2.x[1] \n \"\"\" \n value of lambda_2\n \"\"\"\n l21 = l1-result2.fun \n a0 = t2-t1\n a1 = theta2-theta1\n N = 2*sample_size+1\n l22 = l1-(l1+f([t2,theta2]))/(1-(np.cos(a1)*dirichlet(a0,N)/N))\n l2 = max(l21,l22)\n \"\"\" \n we compute the statistic\n \"\"\"\n alpha1 = (1/3)*sample_size*(sample_size+1)\n alpha2 = (1/np.sqrt(N))*\\\n sum((k**2-alpha1)*\\\n np.real(y_obs[k+sample_size]*np.exp(1j*(k*t1-theta1))) \\\n for k in range(-sample_size,sample_size+1))\n alpha3 = (1/np.sqrt(N))*sum(k*np.real(y_obs[k+sample_size]*\\\n np.exp(1j*(k*t1-theta1))) for k in range(-sample_size,sample_size+1)) \n stat = (sigma*(alpha1*l1+alpha2)*scs.norm.pdf(l1/sigma)+\\\n (alpha1*sigma**2-alpha3**2)*(1-scs.norm.cdf(l1/sigma)))/\\\n (sigma*(alpha1*l2+alpha2)*scs.norm.pdf(l2/sigma)+\\\n (alpha1*sigma**2-alpha3**2)*(1-scs.norm.cdf(l2/sigma))) \n \n return stat", "def moments2nd(data):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol)) \n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mrr = np.sum(rowgrid**2*data)/Isum\n Mcc = np.sum(colgrid**2*data)/Isum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*data)/Isum \n return Mcc, Mrr, Mrc", "def weights_treatment_parameters(init_dict, GRID):\n GRID = np.linspace(0.01, 0.99, num=99, endpoint=True)\n\n coeffs_untreated = init_dict[\"UNTREATED\"][\"params\"]\n coeffs_treated = init_dict[\"TREATED\"][\"params\"]\n cov = construct_covariance_matrix(init_dict)\n x = simulate_covariates(init_dict)\n\n # We take the specified distribution for the cost shifters from the paper.\n cost_mean, cost_sd = -0.0026, np.sqrt(0.270)\n v_mean, v_sd = 0.00, np.sqrt(cov[2, 2])\n\n eval_points = norm.ppf(GRID, loc=v_mean, scale=v_sd)\n\n ate_weights = np.tile(1.0, 99)\n tut_weights = norm.cdf(eval_points, loc=cost_mean, scale=cost_sd)\n\n tt_weights = 1 - tut_weights\n\n def tut_integrand(point):\n eval_point = norm.ppf(point, loc=v_mean, scale=v_sd)\n return norm.cdf(eval_point, loc=cost_mean, scale=cost_sd)\n\n def tt_integrand(point):\n eval_point = norm.ppf(point, loc=v_mean, scale=v_sd)\n return norm.cdf(eval_point, loc=cost_mean, scale=cost_sd)\n\n # Scaling so that the weights integrate to one.\n tut_scaling = quad(tut_integrand, 0.01, 0.99)[0]\n tut_weights /= tut_scaling\n\n tt_scaling = quad(tt_integrand, 0.01, 0.99)[0]\n tt_weights /= tt_scaling\n\n mte = mte_information(coeffs_treated, coeffs_untreated, cov, GRID, x, init_dict)\n\n return ate_weights, tt_weights, tut_weights, mte", "def _second_moment(R, sig_l, sig_m, lum, mass, Mbh, beta, tensor,\n sigmaPsf, normPsf, step, nrad, surf_l, pixSize):\n if (max(sigmaPsf) > 0) and (pixSize > 0): # PSF convolution\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n\n # Make grid linear in log of radius RR\n #\n rmax = np.max(R) + mx # Radius of circle containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in log(RR)\n rr = np.exp(logRad)\n\n # The model Vrms computation is only performed on the radial grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(rr)\n mgePol = np.empty_like(rr)\n rup = 3*np.max(sig_l)\n for j in range(rr.size): # Integration of equation (50)\n wm2Pol[j] = quadva(_integrand, [rr[j], rup],\n args=(sig_l, sig_m, lum, mass, Mbh, rr[j], beta, tensor))[0]\n mgePol[j] = np.sum(surf_l * np.exp(-0.5*(rr[j]/sig_l)**2))\n\n nx = np.ceil(rmax/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n xCar, yCar = np.meshgrid(x1, x1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + yCar**2) # Log radius of cartesian grid\n wm2Car = np.interp(r1, logRad, wm2Pol)\n mgeCar = np.interp(r1, logRad, mgePol)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normalization is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = np.sqrt(signal.fftconvolve(wm2Car, kernel, mode='same')\n / signal.fftconvolve(mgeCar, kernel, mode='same'))\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, x1, muCar, R/np.sqrt(2), R/np.sqrt(2))\n\n else: # No PSF convolution: just compute values\n\n mu = np.empty_like(R)\n rmax = 3*np.max(sig_l)\n for j in range(R.size):\n wm2Pol = quadva(_integrand, [R[j], rmax],\n args=(sig_l, sig_m, lum, mass, Mbh, R[j], beta, tensor))[0]\n mgePol = np.sum( surf_l * np.exp(-0.5*(R[j]/sig_l)**2) )\n mu[j] = np.sqrt(wm2Pol/mgePol)\n\n return mu", "def get_thrust_and_moment(self):\n\n f1 = self.k_f * self.omega_1 ** 2\n f2 = self.k_f * self.omega_2 ** 2\n \n # c is often used to indicate \"collective\" thrust\n c = f1 + f2\n \n M_x = (f1 - f2) * self.l\n return c, M_x", "def d2(self):\r\n return self.d1() - self.sigma*self.t**0.5", "def d2(self):\n d1 = self.d1()\n return d1 - self.sigma * (self.t **(0.5))", "def get_dists_2():\n d1 = Distribution(['0', '1'], [1 / 2, 1 / 2])\n d2 = Distribution(['0', '1'], [1 / 3, 2 / 3])\n d3 = Distribution(['0', '1'], [2 / 5, 3 / 5])\n return d1, d2, d3", "def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), nu=(0.0 ,inf), r=(0.0, inf), s=(0.0, inf))\n return params", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low' ] = self.low\n paramDict['high' ] = self.high\n paramDict['alpha'] = self.alpha\n paramDict['beta' ] = self.beta\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['range'] = self.range\n return paramDict\n # no other additional parameters required", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar\n paramDict['k' ] = self.k\n paramDict['low' ] = self.low\n return paramDict", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def MyBaseMoments(p,q,img,gauss_sigma,gauss_centroid=None, gauss_g1=0., gauss_g2=0.):\n weight = galsim.Image(np.zeros_like(img.array))\n gauss = galsim.Gaussian(sigma=gauss_sigma*pixel_scale).shear(g1=gauss_g1,g2=gauss_g2)\n if gauss_centroid is None:\n gauss_centroid = img.true_center\n weight = gauss.drawImage(image=weight, scale=pixel_scale, method='no_pixel', use_true_center=True, offset=(gauss_centroid-img.true_center)*(1))\n x = np.linspace(img.xmin-img.center.x*0-gauss_centroid.x*1, img.xmax-img.center.x*0-gauss_centroid.x*1, img.xmax-img.xmin+1)+0.*0.5\n y = np.linspace(img.ymin-img.center.y*0-gauss_centroid.y*1, img.ymax-img.center.y*0-gauss_centroid.y*1, img.ymax-img.ymin+1)+0.*0.5\n X, Y = np.meshgrid(x,y)\n\n Q00 = np.sum(weight.array*img.array)\n Q10 = gauss_centroid.x + np.sum(X*weight.array*img.array)/Q00\n Q01 = gauss_centroid.y + np.sum(Y*weight.array*img.array)/Q00\n Q20 = np.sum((X**2)*weight.array*img.array)\n Q02 = np.sum((Y**2)*weight.array*img.array)\n\n monomial = 1.\n for pp in xrange(p):\n monomial *= X\n for qq in xrange(q):\n monomial *= Y\n Qpq = np.sum(monomial*weight.array*img.array) #/Q00\n\n return Qpq", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def initialize(self):\n\t\tmu = 0\n\t\tsigma = np.sqrt(2 / self.dataset[\"d\"])\n\n\t\tself.F1 = np.random.normal(mu, sigma, self.F1.shape)\n\t\tself.F2 = np.random.normal(mu, sigma, self.F2.shape)\n\t\tself.W = np.random.normal(mu, sigma, self.W.shape)\n\n\t\tself.F1_momentum = np.zeros(self.F1.shape)\n\t\tself.F2_momentum = np.zeros(self.F2.shape)\n\t\tself.W_momentum = np.zeros(self.W.shape)", "def init_hyperparameters():\n alpha = .8\n alpha2 = 1\n\n return alpha, alpha2", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def logdprior(parameters, hyperparameters):\n sigma_w_part = parameters[0] + invgamma_logpdf(parameters[0],\n hyperparameters[\"sigma_w_shape\"], hyperparameters[\"sigma_w_scale\"])\n sigma_v_part = parameters[1] + invgamma_logpdf(parameters[1], hyperparameters[\"sigma_v_shape\"], hyperparameters[\"sigma_v_scale\"])\n return sigma_w_part + sigma_v_part", "def initializeDistribution(self):\n if self.functionType == 'CDF':\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,True)\n else:\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,False)\n self.dimensionality = self._distribution.returnDimensionality()\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimensionality)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimensionality)]", "def get_initial_params(self, x, y, yerr):\n# p0 = zeros(self.max_phonons + 1)\n p0 = zeros(2)\n p0[0] = 100\n p0[1] = .1\n return p0", "def gaussian_parameters(h, dim=-1):\n m, h = torch.split(h, h.size(dim) // 2, dim=dim)\n v = F.softplus(h) + 1e-8\n return m, v", "def moments(values):\n\n meanValue = numpy.mean(values)\n return (meanValue,\n numpy.sqrt(moment(values, meanValue, 2)),\n moment(values, meanValue, 3),\n moment(values, meanValue, 4))" ]
[ "0.6455378", "0.6434524", "0.6434524", "0.6220392", "0.6185545", "0.6109156", "0.60708535", "0.60512894", "0.60178155", "0.5966822", "0.59502286", "0.58735156", "0.5850575", "0.58171284", "0.5816514", "0.57661724", "0.5720821", "0.57173246", "0.57122564", "0.5709464", "0.57005703", "0.56566393", "0.56566393", "0.56566393", "0.5650613", "0.5647901", "0.56205666", "0.56183773", "0.5581819", "0.5580499", "0.5579431", "0.5574771", "0.55595124", "0.5549218", "0.5547417", "0.55450284", "0.55362755", "0.5533073", "0.55328864", "0.55292463", "0.55152035", "0.55004686", "0.54993844", "0.54803777", "0.547777", "0.54711723", "0.54631597", "0.5454735", "0.54525715", "0.5436794", "0.54347324", "0.54311454", "0.54289407", "0.5415726", "0.5407236", "0.540343", "0.5398714", "0.5398714", "0.53965294", "0.5371645", "0.5370123", "0.5353437", "0.5351195", "0.5346446", "0.5343545", "0.53380007", "0.53346354", "0.5332762", "0.53268677", "0.53158087", "0.53155935", "0.5310139", "0.5309718", "0.5300877", "0.5296502", "0.5292304", "0.5292062", "0.528314", "0.5280636", "0.5273658", "0.52717936", "0.5267175", "0.5266302", "0.5263959", "0.525519", "0.52547693", "0.5243865", "0.52428687", "0.5240332", "0.52383465", "0.52359915", "0.5235011", "0.5232048", "0.5220906", "0.5220906", "0.5220359", "0.521662", "0.5211572", "0.5206511", "0.52055675" ]
0.6106636
6
This function computes the distribution internal parameters from its two first moments.
def _compute_internals(self, moments): [mean, stdv] = moments internals = {} internals['k'] = mean ** 2. / stdv ** 2. internals['LAMBDA'] = mean / stdv ** 2. return internals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['mu'] = mean\n internals['sigma'] = stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a'] = mean - np.sqrt(3) * stdv\n internals['b'] = mean + np.sqrt(3) * stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a'] = mean - np.sqrt(3) * stdv\n internals['b'] = mean + np.sqrt(3) * stdv\n\n return internals", "def calc_moments(distribution):\n x = torch.linspace(2, 22, 31)\n d_mean = torch.sum(x * distribution)\n d_var = torch.sum(distribution * (x - d_mean) ** 2) \n \n return d_mean, torch.sqrt(d_var)", "def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n mu = 0\r\n Cov = 0\r\n\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n\r\n X = []\r\n for i in x:\r\n j = [1,i[0]]\r\n X.append(j)\r\n X = np.array(X)\r\n\r\n common = np.matmul( X.T , X) + np.identity(2) * sigma2/beta\r\n common = np.linalg.inv(common)\r\n Cov = common * sigma2\r\n mu = np.matmul(common , np.matmul (X.T , z) )\r\n mu = mu.flatten()\r\n print(\"X.shape: \" , X.shape)\r\n print(\"z.shape: \",z.shape)\r\n print(\"Cov.shape\" , Cov.shape)\r\n print(\"mu.shape: \",mu.shape)\r\n density = util.density_Gaussian(mu , Cov , x_s).reshape(150 , 150 ).T\r\n print(\"density.shape\",density.shape)\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n\r\n \r\n\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )))\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.xlim = (-1,1)\r\n plt.ylim = (-1,1)\r\n plt.title('p(a|x1,z1....xn,zn) for '+ str(len(x)) +' samples')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x')\r\n\r\n return (mu,Cov)", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n cov = stdv / mean\n zeta = np.sqrt(np.log(1. + cov ** 2.))\n LAMBDA = np.log(mean) - 0.5 * zeta ** 2.\n internals = {}\n internals['LAMBDA'] = LAMBDA\n internals['zeta'] = zeta\n\n return internals", "def moments(self):", "def parameters(self):\n\n m = self.__m\n s = linalg.cholesky(self.__prod).transpose()\n w = self.__weight\n\n # Compute the parameters of the posterior distribution.\n return linalg.solve(s[:m, :m], s[:m, m:]), \\\n np.dot(s[:m, :m].transpose(), s[:m, :m]), \\\n np.dot(s[m:, m:].transpose(), s[m:, m:]) / w, \\\n w", "def parameters_to_marginal_moments(prob, distmu, distsigma):\n good = np.isfinite(prob) & np.isfinite(distmu) & np.isfinite(distsigma)\n prob = prob[good]\n distmu = distmu[good]\n distsigma = distsigma[good]\n distmean, diststd, _ = parameters_to_moments(distmu, distsigma)\n rbar = (prob * distmean).sum()\n r2bar = (prob * (np.square(diststd) + np.square(distmean))).sum()\n return rbar, np.sqrt(r2bar - np.square(rbar))", "def _get_distribution_variables(self, R):\n domain, Domain = self.domain_Domain\n phase_name = self.phase_name\n\n R_typ = self.phase_param.R_typ # [m]\n # Particle-size distribution (area-weighted)\n f_a_dist = self.phase_param.f_a_dist(R) # [m-1]\n\n # Ensure the distribution is normalised, irrespective of discretisation\n # or user input\n f_a_dist = f_a_dist / pybamm.Integral(f_a_dist, R) # [m-1]\n\n # Volume-weighted particle-size distribution\n f_v_dist = R * f_a_dist / pybamm.Integral(R * f_a_dist, R) # [m-1]\n\n # Number-based particle-size distribution\n f_num_dist = (f_a_dist / R**2) / pybamm.Integral(\n f_a_dist / R**2, R\n ) # [m-1]\n\n # True mean radii and standard deviations, calculated from the f_a_dist that\n # was given, all have units [m]\n R_num_mean = pybamm.Integral(R * f_num_dist, R)\n R_a_mean = pybamm.Integral(R * f_a_dist, R)\n R_v_mean = pybamm.Integral(R * f_v_dist, R)\n sd_num = pybamm.sqrt(pybamm.Integral((R - R_num_mean) ** 2 * f_num_dist, R))\n sd_a = pybamm.sqrt(pybamm.Integral((R - R_a_mean) ** 2 * f_a_dist, R))\n sd_v = pybamm.sqrt(pybamm.Integral((R - R_v_mean) ** 2 * f_v_dist, R))\n\n # X-average the means and standard deviations to give scalars\n # (to remove the \"electrode\" domain, if present)\n R_num_mean = pybamm.x_average(R_num_mean)\n R_a_mean = pybamm.x_average(R_a_mean)\n R_v_mean = pybamm.x_average(R_v_mean)\n sd_num = pybamm.x_average(sd_num)\n sd_a = pybamm.x_average(sd_a)\n sd_v = pybamm.x_average(sd_v)\n\n # X-averaged distributions, or broadcast\n if R.domains[\"secondary\"] == [f\"{domain} electrode\"]:\n f_a_dist_xav = pybamm.x_average(f_a_dist)\n f_v_dist_xav = pybamm.x_average(f_v_dist)\n f_num_dist_xav = pybamm.x_average(f_num_dist)\n else:\n f_a_dist_xav = f_a_dist\n f_v_dist_xav = f_v_dist\n f_num_dist_xav = f_num_dist\n\n # broadcast\n f_a_dist = pybamm.SecondaryBroadcast(f_a_dist_xav, [f\"{domain} electrode\"])\n f_v_dist = pybamm.SecondaryBroadcast(f_v_dist_xav, [f\"{domain} electrode\"])\n f_num_dist = pybamm.SecondaryBroadcast(\n f_num_dist_xav, [f\"{domain} electrode\"]\n )\n\n variables = {\n f\"{Domain} {phase_name}particle sizes\": R / R_typ,\n f\"{Domain} {phase_name}particle sizes [m]\": R,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_a_dist,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_v_dist,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" distribution [m-1]\": f_num_dist,\n f\"{Domain} area-weighted mean particle radius [m]\": R_a_mean,\n f\"{Domain} volume-weighted mean particle radius [m]\": R_v_mean,\n f\"{Domain} number-based mean particle radius [m]\": R_num_mean,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_a,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_v,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" standard deviation [m]\": sd_num,\n # X-averaged sizes and distributions\n f\"X-averaged {domain} {phase_name}particle sizes [m]\": pybamm.x_average(R),\n f\"X-averaged {domain} area-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_a_dist_xav,\n f\"X-averaged {domain} volume-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_v_dist_xav,\n f\"X-averaged {domain} number-based {phase_name}particle-size \"\n \"distribution [m-1]\": f_num_dist_xav,\n }\n\n return variables", "def grd_posterior_gaussian(self, ) -> Tuple[np.ndarray, np.ndarray]:\n xmin, xmax = self.x_range\n ymin, ymax = self.y_range\n\n mu = np.array([0, 0])\n sigma = np.zeros((2, 2))\n\n _sample = self._sample\n _prior = self.prior\n\n def mean_x(x: float, y: float):\n return x * _sample(x, y) * _prior.eval(x, y)\n\n def mean_y(x: float, y: float):\n return y * _sample(x, y) * _prior.eval(x, y)\n\n def var_x(x: float, y: float):\n return x * mean_x(x, y)\n\n def var_y(x: float, y: float):\n return y * mean_y(x, y)\n\n # def var_xy(x: float, y: float):\n # return x * mean_y(x, y)\n\n # First moment\n (mu[0], mu[1]) = (integrate.dblquad(mean_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(mean_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0])\n (sigma[0, 0], sigma[1, 1]) = \\\n (integrate.dblquad(var_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(var_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n # integrate.dblquad(var_xy, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n return mu, sigma", "def calc_parameters(T, N, sigma, r, div):\n dt = T / N\n u = np.exp(sigma * np.sqrt(dt))\n d = 1 / u\n b = r - div\n q = 1 / 2 + 1 / 2 * (b - 1 / 2 * sigma ** 2) * np.sqrt(dt) / sigma # P(up movement)\n return dt, u, d, q, b", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n return paramDict", "def calc_parameters(T, N, sigma, r, div):\n dt = T/N\n u = np.exp(sigma*np.sqrt(dt))\n d = 1/u\n b = r-div\n q = 1/2 + 1/2 * (b - 1/2 * sigma**2)*np.sqrt(dt)/sigma # P(up movement)\n return(dt, u, d, q, b)", "def random():\n # only care about the value of second_moment:\n # curve = scale * e**(-second_moment^2 q^2)/q^2\n # scale = 6 pi/100 (contrast/density*absorbed_amount)^2 * Vf/radius\n # the remaining parameters can be randomly generated from zero to\n # twice the default value as done by default in compare.py\n pars = dict(\n scale=1,\n second_moment=10**np.random.uniform(1, 3),\n )\n return pars", "def N2_f(d1,d2,rho):\n import statsmodels.sandbox.distributions.extras as extras\n muStandardNormal=0.0 # mean of a standard normal distribution \n varStandardNormal=1.0 # variance of standard normal distribution \n upper=([d1,d2]) # upper bound for two values\n v=varStandardNormal # simplify our notations\n mu=muStandardNormal # simplify our notations\n covM=([v,rho],[rho,v])\n return extras.mvnormcdf(upper,mu,covM)", "def get_means_and_scales(self):\n return self.optim.parameters[::2], np.exp(self.optim.parameters[1::2])", "def initializeDistribution(self):\n self.convertToDistrDict['Laguerre'] = self.convertLaguerreToGamma\n self.convertToQuadDict ['Laguerre'] = self.convertGammaToLaguerre\n self.measureNormDict ['Laguerre'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed):\n # and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low)\n #self.lowerBoundUsed = 0.0\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Laguerre'\n self.preferredPolynomials = 'Laguerre'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = 0.0\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low,a,b)", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mean' ] = self.mean\n paramDict['sigma'] = self.sigma\n paramDict['low'] = self.low\n return paramDict", "def parameters(conv, orthogonal):\n nrm = operator_one_norm(conv.weight).detach().cpu().numpy()\n\n if nrm > 15:\n print('Overflow likely, norm={}'.format(nrm))\n\n m = np.arange(1, len(THETA) + 1)\n vals = m * np.ceil(nrm / THETA)\n mstar = min(1 + np.argmin(vals), 56)\n s = int(np.ceil(nrm / THETA[mstar - 1]))\n\n return mstar, s", "def get_initial_parameters(token_segs):\r\n estems = {} # tracks the average probability of each root\r\n esuffix = {} # tracks the average probability of each suffix\r\n etrans = {} # tracks the average probability of each (transition, feature) pair\r\n eftrans = {} # tracks the average probability of each feature (interface between stem and suffix)\r\n\r\n # collect the probabilities of each object, to be normalized (divided by their totals) later\r\n for ts_list in token_segs:\r\n avg_prob = 1.0 / len(ts_list)\r\n for ts in ts_list:\r\n root = ts.root\r\n rand_val = 1.0\r\n if root in estems:\r\n estems[root] += rand_val * avg_prob\r\n else: estems[root] = rand_val * avg_prob\r\n\r\n suffix = ts.suffix\r\n if suffix in esuffix:\r\n esuffix[suffix] += rand_val * avg_prob\r\n else: esuffix[suffix] = rand_val * avg_prob\r\n\r\n trans = ts.trans\r\n ftrans = feature(root, suffix)\r\n if (trans, ftrans) in etrans:\r\n etrans[(trans, ftrans)] += rand_val * avg_prob\r\n else: etrans[(trans, ftrans)] = rand_val * avg_prob\r\n\r\n if ftrans in eftrans:\r\n eftrans[ftrans] += rand_val * avg_prob\r\n else: eftrans[ftrans] = rand_val * avg_prob\r\n\r\n # divide by the totals\r\n probstems = estems\r\n probsum = sum(probstems.values())\r\n for stem in probstems:\r\n probstems[stem] /= probsum\r\n\r\n probsuffix = esuffix\r\n probsum = sum(probsuffix.values())\r\n for suffix in probsuffix:\r\n probsuffix[suffix] /= probsum\r\n\r\n probtrans = etrans\r\n for trans, ftrans in probtrans:\r\n probtrans[(trans, ftrans)] /= eftrans[ftrans]\r\n\r\n return probstems, probsuffix, probtrans", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def get_distribution(self):\n\n # If the distributions have been updated before.\n if self.update_number > 0:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number,1):\n probability = self.document_distribution_over_topic[m][k] / self.update_number\n self.document_distribution_over_topic[m][k] = probability\n for k in range(0, self.topic_number,1):\n for v in range(0, self.term_number,1):\n probability = self.topic_distribution_over_term[k][v] / self.update_number\n self.topic_distribution_over_term[k][v] = probability\n # The distributions have not been updated once.\n else:\n for m in range(0, self.document_number, 1):\n for k in range(0, self.topic_number, 1):\n self.document_distribution_over_topic[m][k] = (\n (self.document_topic_count_matrix[m][k] + self.alpha[k]) / (\n self.sum_document_by_topic_count[m] + self.sum_alpha))\n for k in range(0, self.topic_number, 1):\n for v in range(0, self.term_number, 1):\n self.topic_distribution_over_term[k][v] = (\n (self.topic_term_count_matrix[k][v] + self.beta[v]) / (\n self.sum_topic_by_term_count[k] + self.sum_beta))", "def generate_moments(hyper, params):\n\n k, d = hyper['k'], hyper['d']\n\n p = params # Shorthand, don't judge\n m = {} # Moments\n for x1 in xrange(1,d+1):\n m[(x1,)] = sum( p[(h,x1)] * p[(h,)] for h in xrange(1,k+1) )\n for x2 in xrange(1,d+1):\n m[(x1,x2)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,)] for h in xrange(1,k+1) )\n for x3 in xrange(1,d+1):\n m[(x1,x2,x3)] = sum( p[(h,x1)] * p[(h,x2)] * p[(h,x3)] * p[(h,)] for h in xrange(1,k+1) )\n return m", "def update_moments_r(self):\n denominator = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 )\n nominator1 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2 ) * self.constellation\n \n nominator2 = np.exp(self.gamma_r[:, None] * self.constellation\n - self.Sigma_r[:, None] * np.power(self.constellation, 2) /2) * np.power(self.constellation, 2)\n try:\n \n moment1 = nominator1.sum(axis=1) / denominator.sum(axis=1)\n moment2 = nominator2.sum(axis=1) / denominator.sum(axis=1)\n assert np.all(np.logical_not(np.isnan(moment1))) and np.all(np.logical_not(np.isnan(moment2)))\n except:\n print(\"Oops! That was no valid number. Try again...\")\n\n \n self.mu = moment1\n return moment1, moment2", "def var_parameters(jd,mag,err):\n\n mean = np.mean(mag)\n nepochs = float(len(jd))\n\n chi = np.sum( (mag - mean)**2. / err**2. )\n p_chi = chi2.cdf(chi,(nepochs-1))\n\n\n a = (mag-mean)**2\n ex_var = (np.sum(a-err**2)/((nepochs*(mean**2))))\n sd = np.sqrt((1./(nepochs-1))*np.sum(((a-err**2)-ex_var*(mean**2))**2))\n ex_verr = sd/((mean**2)*np.sqrt(nepochs))\n\n\n return p_chi, ex_var, ex_verr", "def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n m_v = np.zeros(2)\r\n print(\"m_v shape: \" ,m_v.shape)\r\n c_v = np.array( [ [ beta , 0 ] , [ 0 , beta ] ] )\r\n print(\"c_v shape: \",c_v.shape)\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n print(\"x_s shape: \",x_s.shape)\r\n density = util.density_Gaussian(m_v , c_v , x_s)\r\n #print(density)\r\n print(\"length density \",len(density))\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )) )\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.title('p(a)')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x')\r\n return", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['strategy'] = self.strategy\n paramDict['nPoints'] = self.nPoints\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar # rate parameter\n paramDict['low' ] = self.low # lower domain boundary\n return paramDict", "def PH2From3Moments (moms, prec=1e-14):\n\n m1, m2, m3 = moms\n\n # check moment boounds\n m2l = APH2ndMomentLowerBound(m1, 2) \n m3l = APH3rdMomentLowerBound(m1, m2, 2) \n m3u = APH3rdMomentUpperBound(m1, m2, 2) \n \n if m2<m2l:\n raise Exception(\"The given second moment is not feasible!\") \n if m3<m3l:\n raise Exception(\"The given third moment is not feasible (too small)!\")\n if m3>m3u:\n raise Exception(\"The given third moment is not feasible (too large)!\")\n \n # check if we have an exponential distribution\n if abs(m2/m1/m1-2.0) < prec:\n return (np.matrix([1]), np.matrix([[-1/m1]]))\n \n # calculate parameters\n b = 3.0*m1*m2-m3\n c = 3.0*m2*m2-2.0*m1*m3\n e = -2.0*m1*m1+m2\n a = b*b+6.0*c*e\n if a<0:\n a = 0\n a = math.sqrt(a)\n if c>0:\n lambda1 = (b - a) / c\n lambda2 = (b + a) / c\n p = (-b-6.0*m1*e+a) / (b+a)\n elif c<0:\n lambda1 = (b + a) / c\n lambda2 = (b - a) / c\n p = (b+6.0*m1*e+a) / (-b+a)\n elif c==0:\n lambda1 = 0\n lambda2 = 1.0 / m1\n p = 0\n \n # return the result\n return (np.matrix([p,1.0-p]), np.matrix([[-lambda1, lambda1], [0,-lambda2]]))", "def prop_func_form_params(param1,param2,*arg):\n return np.log(MH.simple_2D_Gauss(param1-param2,arg[0],arg[1]))", "def fdist(param1, param2):\n return(prng.gamma(param1, param2))", "def initializeDistribution(self):\n self.convertToDistrDict['Jacobi'] = self.convertJacobiToBeta\n self.convertToQuadDict ['Jacobi'] = self.convertBetaToJacobi\n self.measureNormDict ['Jacobi'] = self.stdProbabilityNorm\n #this \"if\" section can only be called if distribution not generated using readMoreXML\n if (not self.upperBoundUsed) and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,self.low)\n else:\n if self.lowerBoundUsed == False:\n a = 0.0\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,a,b,self.low)\n self.preferredPolynomials = 'Jacobi'\n self.compatibleQuadrature.append('Jacobi')\n self.compatibleQuadrature.append('ClenshawCurtis')", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['apex' ] = self.apex\n paramDict['min' ] = self.min\n paramDict['max' ] = self.max\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['mu' ] = self.mu\n return paramDict", "def distribution(self):\n \n #external_distribution serves both the purpose of external setting of distribution and the caching of distribution()\n if self.external_distribution:\n return self.external_distribution_array;\n else:\n energy_vector = []\n superset = self.generate_superset(0) \n \n for i in superset:\n state = self.ket(i)\n \n norm_squared = np.dot(state.T, state)\n \n if norm_squared > 0: #zero is appended at the end\n energy = np.dot(state.T, np.dot( self.epsilon, state))\n interaction = np.dot(state.T, np.dot( self.u, state))/2.0 #divide by two. Otherwise, <l r| U |l r > = U_LR + U_RL = 2U\n #print state, np.dot(self.u, state) \n #print interaction\n energy_vector.append( energy + interaction )\n \n energy_vector.insert(0, 0.0) \n probability = np.exp( np.multiply(-self.beta, energy_vector)) \n probability /= probability.sum() \n return probability", "def initializeDistribution(self):\n self.convertToDistrDict['Hermite'] = self.convertHermiteToNormal\n self.convertToQuadDict ['Hermite'] = self.convertNormalToHermite\n self.measureNormDict ['Hermite'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed) and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicNormalDistribution(self.mean,\n self.sigma)\n self.lowerBound = -sys.float_info.max\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Hermite'\n self.preferredPolynomials = 'Hermite'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = -sys.float_info.max\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicNormalDistribution(self.mean,\n self.sigma,\n a,b)", "def prop_dist_form_params(*arg):\n return np.random.multivariate_normal(*arg)", "def _get_prior_params(self):\n # relation transformation matrix\n M_mu = np.eye(self.n_polarities, dtype=\"float32\")\n M_mu[1, :] = [0., 0.3, 0.]\n M_mu = np.tile(M_mu, (self.n_rels, 1)).reshape(\n self.n_rels, self.n_polarities, self.n_polarities\n )\n # for rel, rel_idx in iteritems(self.rel2idx):\n # # swap axes for contrastive relations\n # if check_rel(rel, CONTRASTIVE_RELS):\n # mu_i = M_mu[rel_idx]\n # mu_i[[0, 2]] = mu_i[[2, 0]]\n M_mu = torch.tensor(M_mu)\n M_sigma = torch.tensor(\n np.ones((self.n_rels, self.n_polarities, self.n_polarities),\n dtype=\"float32\")\n )\n # beta\n beta_p = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n beta_q = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n # z_epsilon\n z_epsilon_p = torch.tensor(1.)\n z_epsilon_q = torch.tensor(15.)\n # scale factor\n scale_factor = torch.tensor(34.)\n return {\"M_mu\": M_mu, \"M_sigma\": M_sigma, \"beta_p\": beta_p,\n \"beta_q\": beta_q, \"z_epsilon_p\": z_epsilon_p,\n \"z_epsilon_q\": z_epsilon_q, \"scale_factor\": scale_factor}", "def demo_indef():\n n_dim = 2\n A = np.eye(n_dim)\n A[1, 1] = -1.\n covar = np.eye(n_dim)\n mean = np.zeros(n_dim)\n approx = approx_quad_form(mean, covar, A)\n\n # Sample from true dist\n n_sample = 10000\n x = np.random.multivariate_normal(mean, covar, n_sample)\n q_samples = np.zeros(n_sample)\n for i in range(n_sample):\n q_samples[i] = x[i] @ A @ x[i]\n\n q = np.linspace(-10, 10)\n\n plt.plot(\n q, approx(q), label='Approx.',\n color='tab:blue', linestyle='--')\n bins = np.linspace(-8, 8, 81)\n bins[0] = -np.inf\n bins[-1] = np.inf\n plt.hist(\n q_samples, density=True, histtype='stepfilled',\n bins=bins,\n alpha=0.5, color='black', label='Samples')\n plt.xlabel('q')\n plt.ylabel('pdf(q) [-]')\n plt.legend()\n\n central_moments_sample = scipy.stats.moment(\n q_samples, moment=[0, 1, 2, 3, 4])\n print(central_moments_sample)", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['mu'] = self.mean\n retDict['sigma'] = self.sigma\n return retDict", "def initializeDistribution(self):\n self.raiseAMessage('initialize distribution')\n mu = distribution1D.vectord_cxx(len(self.mu))\n for i in range(len(self.mu)):\n mu[i] = self.mu[i]\n covariance = distribution1D.vectord_cxx(len(self.covariance))\n for i in range(len(self.covariance)):\n covariance[i] = self.covariance[i]\n if self.method == 'spline':\n if self.covarianceType != 'abs':\n self.raiseAnError(IOError,'covariance with type ' + self.covariance + ' is not implemented for ' + self.method + ' method')\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu)\n elif self.method == 'pca':\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu, str(self.covarianceType), self.rank)\n if self.transformation:\n self.lowerBound = [-sys.float_info.max]*self.rank\n self.upperBound = [sys.float_info.max]*self.rank\n else:\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimension)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimension)]", "def _init_params(self):\n self.W_ems = []\n self.b_ems = []\n if self.rank_n_approx:\n W_em1 = self.init_fn[0](self.n_in,\n self.rank_n_approx,\n self.sparsity[0],\n self.scale[0],\n self.rng)\n W_em2 = self.init_fn[0](self.rank_n_approx,\n self.n_hids[0],\n self.sparsity[0],\n self.scale[0],\n self.rng)\n self.W_em1 = theano.shared(W_em1,\n name='W1_0_%s'%self.name)\n self.W_em2 = theano.shared(W_em2,\n name='W2_0_%s'%self.name)\n self.W_ems = [self.W_em1, self.W_em2]\n\n else:\n W_em = self.init_fn[0](self.n_in,\n self.n_hids[0],\n self.sparsity[0],\n self.scale[0],\n self.rng)\n self.W_em = theano.shared(W_em,\n name='W_0_%s'%self.name)\n self.W_ems = [self.W_em]\n\n self.b_em = theano.shared(\n self.bias_fn[0](self.n_hids[0], self.bias_scale[0],self.rng),\n name='b_0_%s'%self.name)\n self.b_ems = [self.b_em]\n\n for dx in range(1, self.n_layers):\n W_em = self.init_fn[dx](self.n_hids[dx-1] / self.pieces[dx],\n self.n_hids[dx],\n self.sparsity[dx],\n self.scale[dx],\n self.rng)\n W_em = theano.shared(W_em, name='W_%d_%s'%(dx,self.name))\n self.W_ems += [W_em]\n\n b_em = theano.shared(\n self.bias_fn[dx](self.n_hids[dx], self.bias_scale[dx],self.rng),\n name='b_%d_%s'%(dx,self.name))\n self.b_ems += [b_em]\n\n self.params = [x for x in self.W_ems]\n\n if self.learn_bias and self.learn_bias!='last':\n self.params = [x for x in self.W_ems] + [x for x in self.b_ems]\n elif self.learn_bias == 'last':\n self.params = [x for x in self.W_ems] + [x for x in\n self.b_ems][:-1]\n self.params_grad_scale = [self._grad_scale for x in self.params]\n if self.weight_noise:\n self.nW_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_ems]\n self.nb_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_ems]\n\n self.noise_params = [x for x in self.nW_ems] + [x for x in self.nb_ems]\n self.noise_params_shape_fn = [constant_shape(x.get_value().shape)\n for x in self.noise_params]", "def Rosenblatt_Transform(dist, x_i): \n if dist.stats(moments = 's') > 1 or dist.stats(moments = 's') < -1:\n \n x_N_mean = dist.median()\n x_N_std = (x_i - x_N_mean)/sst.norm.ppf(dist.cdf(x_i))\n \n return(x_N_mean, x_N_std)\n \n else:\n x_N_std = sst.norm.pdf(sst.norm.ppf(dist.cdf(x_i)))/dist.pdf(x_i)\n x_N_mean = x_i - sst.norm.ppf(dist.cdf(x_i))*x_N_std\n return(x_N_mean, x_N_std)", "def _get_marginal_pdfs( res, nbins=51, verbose=True ):\n\tvparam_names = res.vparam_names\n\tweights = res.weights\n\tsamples = res.samples\n\n\tpdfdict = {}\n\n\tfor param in vparam_names :\n\t\tipar = vparam_names.index( param )\n\t\tparamvals = samples[:,ipar]\n\n\t\tif nbins>1:\n\t\t\tif param in res.bounds :\n\t\t\t\tparvalmin, parvalmax = res.bounds[param]\n\t\t\telse :\n\t\t\t\tparvalmin, parvalmax = 0.99*paramvals.min(), 1.01*paramvals.max()\n\t\t\tparambins = np.linspace( parvalmin, parvalmax, nbins, endpoint=True ).flatten()\n\t\t\tbinindices = np.digitize( paramvals, parambins )\n\n\t\t\t# we estimate the marginalized pdf by summing the weights of all points in the bin,\n\t\t\t# where the weight of each point is the prior volume at that point times the\n\t\t\t# likelihood, divided by the total evidence\n\t\t\tpdf = np.array( [ weights[np.where( binindices==ibin )].sum() for ibin in range(len(parambins)) ] )\n\t\telse :\n\t\t\tparambins = None\n\t\t\tpdf = None\n\n\n\t\tmean = (weights * samples[:,ipar]).sum()\n\t\t#print(samples[:,ipar]-mean)\n\t\t#print(weights)\n\t\tstd = np.sqrt( (weights * (samples[:,ipar]-mean)**2 ).sum() )\n\n\n\t\tpdfdict[param] = (parambins,pdf,mean,std,res.logz)\n\n\t\tif verbose :\n\t\t\tif np.abs(std)>=0.1:\n\t\t\t\tprint( ' <%s> = %.2f +- %.2f'%( param, np.round(mean,2), np.round(std,2)) )\n\t\t\telif np.abs(std)>=0.01:\n\t\t\t\tprint( ' <%s> = %.3f +- %.3f'%( param, np.round(mean,3), np.round(std,3)) )\n\t\t\telif np.abs(std)>=0.001:\n\t\t\t\tprint( ' <%s> = %.4f +- %.4f'%( param, np.round(mean,4), np.round(std,4)) )\n\t\t\telse :\n\t\t\t\tprint( ' <%s> = %.3e +- %.3e'%( param, mean, std) )\n\n\n\t\tif param == 'x0' :\n\t\t\tsalt2 = sncosmo.Model( source='salt2')\n\t\t\tsalt2.source.set_peakmag( 0., 'bessellb', 'ab' )\n\t\t\tx0_AB0 = salt2.get('x0')\n\t\t\tmBmean = -2.5*np.log10( mean / x0_AB0 )\n\t\t\tmBstd = 2.5*np.log10( np.e ) * std / mean\n\t\t\tmBbins = -2.5*np.log10( parambins / x0_AB0 )\n\n\t\t\tpdfdict['mB'] = ( mBbins, pdf, mBmean, mBstd )\n\t\t\tif verbose:\n\t\t\t\tprint( ' <%s> = %.3f +- %.3f'%( 'mB', np.round(mBmean,3), np.round(mBstd,3)) )\n\n\treturn( pdfdict )", "def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict", "def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n\r\n # mean of posterior distribution is the MAP estimate of the weights a\r\n # tau^2(from notes) is beta\r\n\r\n extra_col = np.ones((x.shape[0], 1))\r\n x = np.append(extra_col, x, axis = 1)\r\n\r\n alpha_map = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))@(np.transpose(x)@z)\r\n mu = alpha_map\r\n\r\n Cov = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))*sigma2\r\n\r\n num_x = 100\r\n num_y = 100\r\n\r\n xvalues = np.linspace(-1, 1, num = num_x)\r\n yvalues = np.linspace(-1, 1, num = num_y)\r\n X_grid, Y_grid = np.meshgrid(xvalues, yvalues)\r\n\r\n samples = np.column_stack((X_grid.flatten(), Y_grid.flatten()))\r\n\r\n density = util.density_Gaussian(mu.squeeze(), Cov, samples)\r\n density_grid = np.reshape(density, (num_x, num_y))\r\n\r\n plt.figure(1)\r\n plt.title(\"Posterior Distribution of α Given 5 Data Points\")\r\n plt.xlabel('$α_0$')\r\n plt.ylabel('$α_1$')\r\n plt.scatter(-0.1, -0.5, c='r')\r\n plt.contour(X_grid, Y_grid, density_grid, cmap=plt.cm.winter)\r\n plt.show()\r\n\r\n return (mu,Cov)", "def get_prob_for_distributions(p):\n w1 = p[0]\n mu1 = p[1]\n sigma1 = p[2]\n w2 = p[3]\n mu2 = p[4]\n sigma2 = p[5]\n w3 = p[6]\n mu3 = p[7]\n sigma3 = p[8]\n dist_range = (0, 4.330310991999920844e+01)\n x = np.linspace(dist_range[0], dist_range[1], 1000)\n A1 = np.array(w1 * mlab.normpdf(x, mu1, sigma1)).sum()\n A2 = np.array(w2 * mlab.normpdf(x, mu2, sigma2)).sum()\n A3 = np.array(w3 * mlab.normpdf(x, mu3, sigma3)).sum()\n p1 = A1 / (A1 + A2 + A3)\n p2 = A2 / (A1 + A2 + A3)\n p3 = A3 / (A1 + A2 + A3)\n return p1, p2, p3", "def Get_params(numparams, dt, D):\n # bounds from table 1 Kowalek et al 2020\n Nmin, Nmax = 30, 600\n Bmin, Bmax = 1, 6\n Rmin, Rmax = 1, 17\n alphamin, alphamax = 0.3, 0.7\n Qmin, Qmax = 1, 9\n\n # Gen parameters\n Q = np.random.uniform(Qmin, Qmax, size=numparams)\n Q1, Q2 = Q, Q\n\n NsND = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsAD = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsCD = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsDM = np.random.randint(Nmin, Nmax + 1, size=numparams)\n TDM = NsDM * dt\n\n B = np.random.uniform(Bmin, Bmax, size=numparams)\n r_c = np.sqrt(D * NsCD * dt / B) # solving for r_c in eq. 8 Kowalek\n\n R = np.random.uniform(Rmin, Rmax, size=numparams)\n v = np.sqrt(R * 4 * D / TDM) # solving for v in eq. 7 Kowalek\n\n alpha = np.random.uniform(alphamin, alphamax, size=numparams)\n\n # Compute sigma for ND, AD, CD from eq. 12 Kowalek\n sigmaND = np.sqrt(D * dt) / Q1\n sigmaAD = np.sqrt(D * dt) / Q1\n sigmaCD = np.sqrt(D * dt) / Q1\n\n # Compute sigma for DM from eq. 12 Kowalek\n sigmaDM = np.sqrt(D * dt + v ** 2 * dt ** 2) / Q2\n\n return np.array(\n [\n NsND,\n NsAD,\n NsCD,\n NsDM,\n D * np.ones(numparams),\n dt * np.ones(numparams),\n r_c,\n v,\n alpha,\n sigmaND,\n sigmaAD,\n sigmaCD,\n sigmaDM,\n ]\n ).T", "def _calpara(self):\n self.up = math.exp(self.sigma*math.sqrt(self.deltatime))\n self.down = math.exp(-self.sigma*math.sqrt(self.deltatime))\n self.upprob = (math.exp((self.r-self.d)*self.deltatime)-self.down)/(self.up-self.down)", "def APH2ndMomentLowerBound (m1, n):\n\n return float(m1)*m1*(n+1) / n", "def _construct_mom_stuff(self):\n a = self.mom_mix_rate\n dist_mean = self.GN.dist_mean\n dist_cov = self.GN.dist_cov\n # Get the generated sample observations for this batch, transformed\n # linearly into the desired space for moment matching...\n X_b = T.dot(self.GN.output, self.mom_match_proj)\n # Get their mean\n batch_mean = T.mean(X_b, axis=0)\n # Get the updated generator distribution mean\n new_mean = ((1.0 - a[0]) * self.GN.dist_mean) + (a[0] * batch_mean)\n # Use the mean to get the updated generator distribution covariance\n X_b_minus_mean = X_b - new_mean\n # Whelp, I guess this line needs the cast... for some reason...\n batch_cov = T.dot(X_b_minus_mean.T, X_b_minus_mean) / T.cast(X_b.shape[0], 'floatX')\n new_cov = ((1.0 - a[0]) * self.GN.dist_cov) + (a[0] * batch_cov)\n # Get the cost for deviation from the target distribution's moments\n mean_err = new_mean - self.target_mean\n cov_err = (new_cov - self.target_cov)\n mm_cost = self.mom_match_weight[0] * \\\n (T.sum(mean_err**2.0) + T.sum(cov_err**2.0))\n # Construct the updates for the running estimates of the generator\n # distribution's first and second-order moments.\n mom_updates = OrderedDict()\n mom_updates[self.GN.dist_mean] = new_mean\n mom_updates[self.GN.dist_cov] = new_cov\n return [mm_cost, mom_updates]", "def aicpdf(xvals, distribution, params):\n if distribution == 'pareto':\n pvals = (params['xmin'] * params['mu'] ** params['xmin']) / (xvals ** (params['xmin'] + 1))\n return pvals\n \n elif distribution == 'lognormal':\n #import pdb; pdb.set_trace()\n pvals = np.exp(-(np.log(xvals) - params['mu'])**2 / (2 * params['sigma']**2)) / (xvals * params['sigma'] * np.sqrt(2*np.pi))\n return pvals\n \n elif distribution == 'normal':\n pvals = np.exp(-(xvals - params['mu'])**2 / (2 * params['sigma']**2)) / (params['sigma'] * np.sqrt(2*np.pi))\n return pvals\n \n elif distribution == 'exponential':\n pvals = params['lambda'] * np.exp(-params['lambda'] * xvals)\n return pvals \n \n elif distribution == 'boundedpl':\n #pvals = (params['mu'] * (params['mu'] ** params['xmax'] - params['xmin'] ** params['xmax'])) / (xvals ** (params['mu'] + 1))\n #mu * (xmax ^ mu - xmin ^ mu) / x ^ (mu+1)\n pvals = (params['mu'] * (params['xmax'] ** params['mu'] - params['xmin'] ** params['mu'])) / (xvals ** (params['mu'] + 1))\n return pvals", "def moment(self, n, mu, sigma):\n return scipy_norm.moment(n, mu, sigma)", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def computeMoments(x):\n return (abs(stats.skew(x)),abs(stats.kurtosis(x,None,True)))", "def initializeDistribution(self):\n self.minVal = min(math.exp(self.upperBound),math.exp(self.lowerBound))\n self.maxVal = max(math.exp(self.upperBound),math.exp(self.lowerBound))", "def initializeDistribution(self):\n self.convertToDistrDict['Legendre'] = self.convertLegendreToUniform\n self.convertToQuadDict ['Legendre'] = self.convertUniformToLegendre\n self.measureNormDict ['Legendre'] = self.stdProbabilityNorm\n self.convertToDistrDict['ClenshawCurtis'] = self.convertLegendreToUniform\n self.convertToQuadDict ['ClenshawCurtis'] = self.convertUniformToLegendre\n self.measureNormDict ['ClenshawCurtis'] = self.stdProbabilityNorm\n self._distribution = distribution1D.BasicUniformDistribution(self.lowerBound,self.lowerBound+self.range)", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale))", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['mu'] = self.mean\n retDict['sigma'] = self.sigma\n retDict['low'] = self.low\n return retDict", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def estimate_gaussian_params(X):\n mu = X.mean(axis=0)\n var = X.std(axis=0)**2.0\n return mu,var", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::vbfH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)\", mu)')", "def like_one(theta,dt,dmag,sigma):\n\n gamma, A = theta\n aux=(1/np.sqrt(2*np.pi*Veff2(dt,sigma,A,gamma)))*np.exp(-1.0*(dmag**2)/(2.0*Veff2(dt,sigma,A,gamma)))\n\n return aux", "def update_params(x, prior, posterior):\r\n mu0, kappa0, alpha0, beta0 = prior\r\n mu_t, kappa_t, alpha_t, beta_t = posterior\r\n return np.r_[mu0, (kappa_t*mu_t + x)/(kappa_t + 1)], \\\r\n np.r_[kappa0, kappa_t + 1], \\\r\n np.r_[alpha0, alpha_t + 0.5], \\\r\n np.r_[beta0, beta_t + 0.5*kappa_t*(x - mu_t)**2/(kappa_t + 1)]", "def prior_params_tree(self):\n id = {name:i for i, name in enumerate(list(self.tree.keys()))}\n n_nodes = len(id)\n dist_mx = np.zeros((n_nodes, n_nodes))\n\n for node1, edges in self.tree.items():\n for node2, dist in edges.dist:\n dist_mx[id[node1], id[node2]] = dist\n dist_mx[id[node2], id[node1]] = dist\n\n # while np.count_nonzero(dist_mx) < (n_nodes ** 2 - n_nodes):\n for _ in range(20):\n for i, j in combinations(range(n_nodes), 2):\n if dist_mx[i,j] > 0:\n continue\n row_i = dist_mx[i]\n row_j = dist_mx[j]\n value = (row_i + row_j) * (row_i > 0) * (row_j > 0)\n dist_mx[i, j] = dist_mx[j, i] = - max(np.unique(value))\n dist_mx = np.abs(dist_mx)\n\n evolve_rate = []\n for node1, node2 in combinations(self.m_cov.keys(), 2):\n mx_cov_dist = np.abs(self.m_cov[node1] - self.m_cov[node2])\n elements = mx_cov_dist[np.triu_indices(len(mx_cov_dist))]\n norm_elements = elements / dist_mx[id[node2], id[node1]]\n evolve_rate += list(norm_elements)\n\n\n\n df = np.mean([p.shape[0] for _, p in self.m_profiles.items()])\n p_theta_alpha = df/2\n # p_theta_alpha = 4\n p_theta_beta = np.percentile(evolve_rate, 75) * (p_theta_alpha - 1)\n # print(p_theta_alpha, p_theta_beta)\n return p_theta_alpha, p_theta_beta", "def __init__(self, \n param_epsilon, \n param_tau,\n param_u, \n param_gamma_left,\n param_gamma_right,\n param_beta):\n self.epsilon = param_epsilon\n self.tau = param_tau\n self.u = param_u\n self.gamma_left = param_gamma_left\n self.gamma_right = param_gamma_right\n \n self.sigma_retarded = 1j * (self.gamma_left + self.gamma_right) / 2.0\n self.sigma_advanced = - self.sigma_retarded;\n \n self.dim = len(self.u)\n self.rho = np.zeros((2**self.dim))\n \n self.beta = param_beta\n \n self.cutoff_chance = 0.0001\n self.external_distribution = False\n self.external_distribution_array = self.distribution()\n self.external_distribution = True", "def _get_parameters(n, j, domain, g, ncap):\n alphas, betas = rc.recurrenceCoefficients(n - 2, lb=domain[0], rb=domain[1],\n j=j, g=g, ncap=ncap)\n omegas = g * np.array(alphas)\n ts = g * np.sqrt(np.array(betas)[1::])\n c0 = np.sqrt(betas[0])\n return omegas, ts, c0", "def aicmle(timeSeries, distribution):\n mlevals = {} \n if distribution == 'pareto':\n mlevals['xmin'] = np.min(timeSeries)\n mlevals['mu'] = 1 - timeSeries.shape[0] / (timeSeries.shape[0] * np.log(mlevals['xmin']) - np.sum(np.log(timeSeries)))\n \n elif distribution == 'lognormal':\n mlevals['mu'] = np.sum(np.log(timeSeries)) / timeSeries.shape[0]\n mlevals['sigma'] = np.sqrt(np.sum( (np.log(timeSeries) - mlevals['mu'])**2) / timeSeries.shape[0])\n \n elif distribution == 'normal':\n mlevals['mu'] = np.mean(timeSeries)\n mlevals['sigma'] = np.sqrt(sum((timeSeries - np.mean(timeSeries))**2) / timeSeries.shape[0])\n \n elif distribution == 'exponential':\n mlevals['lambda'] = 1.0 / np.mean(timeSeries)\n \n elif distribution == 'boundedpl':\n mlevals['xmin'] = np.min(timeSeries)\n mlevals['xmax'] = np.max(timeSeries)\n minmuEstimate = 1.1\n mlevals['mu'] = fmin(lambda mu: -len(timeSeries) * np.log( (mu - 1) / (np.min(timeSeries)**(1 - mu) - np.max(timeSeries)**(1 - mu))) + mu * np.sum(np.log(timeSeries)), minmuEstimate, disp=0)[0]\n\n return mlevals", "def __init__(self, mean=0.0, sigma=1.0):\n super().__init__()\n self.mean = mean\n self.sigma = sigma\n self.hasInfiniteBound = True\n self.type = 'Normal'\n self.distType = 'Continuous'\n self.compatibleQuadrature.append('Hermite')\n self.compatibleQuadrature.append('CDF')\n #THESE get set in initializeDistribution, since it depends on truncation\n #self.preferredQuadrature = 'Hermite'\n #self.preferredPolynomials = 'Hermite'", "def compute_t_params(mu, kappa, alpha, beta):\r\n mu_, sigma2_, dof_ = mu, beta*(kappa + 1)/(alpha*kappa), 2*alpha\r\n return mu_, sigma2_, dof_", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['n' ] = self.n\n paramDict['p' ] = self.p\n return paramDict", "def generate_stat(sample_size, sparsity = 0, amplitude = 0, sigma = 1):\n var = generate_variable(sample_size, sparsity, amplitude, sigma)\n y_obs = var[0]\n \n \"\"\" \n f is equal to -X(t,theta) and we will minimize f (max. X)\n \"\"\"\n def f(x):\n \"\"\" \n f(x)=-X(t,theta) where x[0]=t and x[1]=theta\n \"\"\"\n res = np.real(np.exp(-1j*x[1])*\\\n sum(y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1))) \n res = -res/np.sqrt(2*sample_size+1) \n return res\n \n def grad_f(x):\n \"\"\" \n gradient of f\n \"\"\"\n res1 = np.real(np.exp(-1j*x[1])*\\\n sum(1j*k*y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1)))\n res1 = -res1/np.sqrt(2*sample_size+1)\n \n res2 = np.real(np.exp(-1j*x[1])*\\\n sum(-1j*y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1)))\n res2 = -res2/np.sqrt(2*sample_size+1)\n return np.array([res1, res2])\n \n #% Minimizing f\n \n \"\"\" \n we minimize on [0, 2pi]^2\n \"\"\"\n bnds = ((0, 2*np.pi), (0, 2*np.pi))\n \n \"\"\" \n We begin by a greedy search of the initialization point over a grid of size 126^2\n the initialization point is init\n \"\"\"\n x = y = np.arange(0, 2*np.pi, 0.05)\n steps = 126\n X, Y = np.meshgrid(x, y)\n val = np.array([f([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])\n init = np.argmin(val)\n x1 = init%steps\n x2 = (init-x1)/steps\n init = [x1*0.05, x2*0.05]\n \n \"\"\" \n we minimize f...\n \"\"\"\n result = sco.minimize(f, init, method=\"L-BFGS-B\",\\\n jac=grad_f, bounds=bnds, tol=1e-15)\n \n \"\"\" \n (t1,theta1) is the argmax of X(t, theta) and l1=$\\lambda_1$\n \"\"\"\n t1 = result.x[0]\n theta1 = result.x[1]\n l1 = -f([t1,theta1])\n \n \n \"\"\" \n Function g(x) is equal to (X(t1,theta1)-X(x))/(1-rho((t1,theta1)-x))\n \"\"\"\n def g(x):\n a0 = x[0]-t1\n a1 = x[1]-theta1\n N = 2*sample_size+1\n \n vec = np.array([a0,a1])\n r = np.linalg.norm(vec)\n \"\"\" \n the value for r=0 is set to l1 (note that r=0 corresponds to x=(t1,theta1))\n \"\"\" \n res = l1 \n \n if (0<r) & (r<0.00001):\n \"\"\" \n we look a values near (t1,theta1) for which an indetermination occurs\n \"\"\" \n alpha= np.arccos(np.clip(a0/np.sqrt(a0**2+a1**2), -1.0, 1.0))\n u0 = np.cos(alpha)\n u1 = np.sin(alpha)\n \"\"\" \n u0,u1 defines the direction (unit vector)\n \"\"\"\n denom = sum((k*np.cos(alpha)-np.sin(alpha))**2*\\\n (np.sinc((r*(k*np.cos(alpha)-np.sin(alpha)))/(2*np.pi)))**2\\\n for k in range(-sample_size,sample_size+1))/N\n \"\"\" \n denom computes the denominator\n \"\"\"\n \n# \"\"\" \n# We use simpson rule for the numerator\n# \"\"\"\n# h = np.linspace(0,1,500)\n# \n# b0 = t1 + h*a0\n# b1 = theta1 + h*a1\n# \n# value = (1-h)*(u0**2*\\\n# np.real(np.exp(-1j*b1)*sum(-k**2*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))\\\n# +2*u0*u1*\\\n# np.real(np.exp(-1j*b1)*sum(k*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))\\\n# +u1**2*\\\n# np.real(np.exp(-1j*b1)*sum((-1)*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))) \n# value = value/np.sqrt(N)\n# \n# num = sci.simps(value, h)\n \n \"\"\" \n we use a quadrature for the numerator\n \"\"\" \n fun_int = lambda w: (1-w)*(u0**2*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum(-k**2*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))\\\n +2*u0*u1*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum(k*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))\\\n +u1**2*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum((-1)*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))) \n \n num = np.mean(sci.quad(fun_int, 0, 1, epsabs=1e-15, epsrel=1e-15, limit=1000))\n \n res = -num/denom\n \n if (r>=0.00001):\n \"\"\" \n we look a values far (t1,theta1) for which there is no indetermination\n \"\"\" \n res = (l1+f(x))/(1-(np.cos(a1)*dirichlet(a0,N)/N))\n \n return res \n \"\"\" \n we minimize g on [0, 2pi]^2 an dwe llok for the initialization point\n \"\"\"\n val2 = np.array([g([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])\n init2 = np.argmin(val2)\n x1 = init2%steps\n x2 = (init2-x1)/steps\n init2 = [x1*0.05, x2*0.05] \n result2 = sco.minimize(g, init2, method=\"L-BFGS-B\", bounds=bnds, tol=1e-15) \n \"\"\" \n argmin of g\n \"\"\"\n t2 = result2.x[0]\n theta2 = result2.x[1] \n \"\"\" \n value of lambda_2\n \"\"\"\n l21 = l1-result2.fun \n a0 = t2-t1\n a1 = theta2-theta1\n N = 2*sample_size+1\n l22 = l1-(l1+f([t2,theta2]))/(1-(np.cos(a1)*dirichlet(a0,N)/N))\n l2 = max(l21,l22)\n \"\"\" \n we compute the statistic\n \"\"\"\n alpha1 = (1/3)*sample_size*(sample_size+1)\n alpha2 = (1/np.sqrt(N))*\\\n sum((k**2-alpha1)*\\\n np.real(y_obs[k+sample_size]*np.exp(1j*(k*t1-theta1))) \\\n for k in range(-sample_size,sample_size+1))\n alpha3 = (1/np.sqrt(N))*sum(k*np.real(y_obs[k+sample_size]*\\\n np.exp(1j*(k*t1-theta1))) for k in range(-sample_size,sample_size+1)) \n stat = (sigma*(alpha1*l1+alpha2)*scs.norm.pdf(l1/sigma)+\\\n (alpha1*sigma**2-alpha3**2)*(1-scs.norm.cdf(l1/sigma)))/\\\n (sigma*(alpha1*l2+alpha2)*scs.norm.pdf(l2/sigma)+\\\n (alpha1*sigma**2-alpha3**2)*(1-scs.norm.cdf(l2/sigma))) \n \n return stat", "def moments2nd(data):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol)) \n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mrr = np.sum(rowgrid**2*data)/Isum\n Mcc = np.sum(colgrid**2*data)/Isum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*data)/Isum \n return Mcc, Mrr, Mrc", "def weights_treatment_parameters(init_dict, GRID):\n GRID = np.linspace(0.01, 0.99, num=99, endpoint=True)\n\n coeffs_untreated = init_dict[\"UNTREATED\"][\"params\"]\n coeffs_treated = init_dict[\"TREATED\"][\"params\"]\n cov = construct_covariance_matrix(init_dict)\n x = simulate_covariates(init_dict)\n\n # We take the specified distribution for the cost shifters from the paper.\n cost_mean, cost_sd = -0.0026, np.sqrt(0.270)\n v_mean, v_sd = 0.00, np.sqrt(cov[2, 2])\n\n eval_points = norm.ppf(GRID, loc=v_mean, scale=v_sd)\n\n ate_weights = np.tile(1.0, 99)\n tut_weights = norm.cdf(eval_points, loc=cost_mean, scale=cost_sd)\n\n tt_weights = 1 - tut_weights\n\n def tut_integrand(point):\n eval_point = norm.ppf(point, loc=v_mean, scale=v_sd)\n return norm.cdf(eval_point, loc=cost_mean, scale=cost_sd)\n\n def tt_integrand(point):\n eval_point = norm.ppf(point, loc=v_mean, scale=v_sd)\n return norm.cdf(eval_point, loc=cost_mean, scale=cost_sd)\n\n # Scaling so that the weights integrate to one.\n tut_scaling = quad(tut_integrand, 0.01, 0.99)[0]\n tut_weights /= tut_scaling\n\n tt_scaling = quad(tt_integrand, 0.01, 0.99)[0]\n tt_weights /= tt_scaling\n\n mte = mte_information(coeffs_treated, coeffs_untreated, cov, GRID, x, init_dict)\n\n return ate_weights, tt_weights, tut_weights, mte", "def _second_moment(R, sig_l, sig_m, lum, mass, Mbh, beta, tensor,\n sigmaPsf, normPsf, step, nrad, surf_l, pixSize):\n if (max(sigmaPsf) > 0) and (pixSize > 0): # PSF convolution\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n\n # Make grid linear in log of radius RR\n #\n rmax = np.max(R) + mx # Radius of circle containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in log(RR)\n rr = np.exp(logRad)\n\n # The model Vrms computation is only performed on the radial grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(rr)\n mgePol = np.empty_like(rr)\n rup = 3*np.max(sig_l)\n for j in range(rr.size): # Integration of equation (50)\n wm2Pol[j] = quadva(_integrand, [rr[j], rup],\n args=(sig_l, sig_m, lum, mass, Mbh, rr[j], beta, tensor))[0]\n mgePol[j] = np.sum(surf_l * np.exp(-0.5*(rr[j]/sig_l)**2))\n\n nx = np.ceil(rmax/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n xCar, yCar = np.meshgrid(x1, x1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + yCar**2) # Log radius of cartesian grid\n wm2Car = np.interp(r1, logRad, wm2Pol)\n mgeCar = np.interp(r1, logRad, mgePol)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normalization is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = np.sqrt(signal.fftconvolve(wm2Car, kernel, mode='same')\n / signal.fftconvolve(mgeCar, kernel, mode='same'))\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, x1, muCar, R/np.sqrt(2), R/np.sqrt(2))\n\n else: # No PSF convolution: just compute values\n\n mu = np.empty_like(R)\n rmax = 3*np.max(sig_l)\n for j in range(R.size):\n wm2Pol = quadva(_integrand, [R[j], rmax],\n args=(sig_l, sig_m, lum, mass, Mbh, R[j], beta, tensor))[0]\n mgePol = np.sum( surf_l * np.exp(-0.5*(R[j]/sig_l)**2) )\n mu[j] = np.sqrt(wm2Pol/mgePol)\n\n return mu", "def get_thrust_and_moment(self):\n\n f1 = self.k_f * self.omega_1 ** 2\n f2 = self.k_f * self.omega_2 ** 2\n \n # c is often used to indicate \"collective\" thrust\n c = f1 + f2\n \n M_x = (f1 - f2) * self.l\n return c, M_x", "def d2(self):\r\n return self.d1() - self.sigma*self.t**0.5", "def d2(self):\n d1 = self.d1()\n return d1 - self.sigma * (self.t **(0.5))", "def get_dists_2():\n d1 = Distribution(['0', '1'], [1 / 2, 1 / 2])\n d2 = Distribution(['0', '1'], [1 / 3, 2 / 3])\n d3 = Distribution(['0', '1'], [2 / 5, 3 / 5])\n return d1, d2, d3", "def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), nu=(0.0 ,inf), r=(0.0, inf), s=(0.0, inf))\n return params", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low' ] = self.low\n paramDict['high' ] = self.high\n paramDict['alpha'] = self.alpha\n paramDict['beta' ] = self.beta\n return paramDict", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['range'] = self.range\n return paramDict\n # no other additional parameters required", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar\n paramDict['k' ] = self.k\n paramDict['low' ] = self.low\n return paramDict", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def MyBaseMoments(p,q,img,gauss_sigma,gauss_centroid=None, gauss_g1=0., gauss_g2=0.):\n weight = galsim.Image(np.zeros_like(img.array))\n gauss = galsim.Gaussian(sigma=gauss_sigma*pixel_scale).shear(g1=gauss_g1,g2=gauss_g2)\n if gauss_centroid is None:\n gauss_centroid = img.true_center\n weight = gauss.drawImage(image=weight, scale=pixel_scale, method='no_pixel', use_true_center=True, offset=(gauss_centroid-img.true_center)*(1))\n x = np.linspace(img.xmin-img.center.x*0-gauss_centroid.x*1, img.xmax-img.center.x*0-gauss_centroid.x*1, img.xmax-img.xmin+1)+0.*0.5\n y = np.linspace(img.ymin-img.center.y*0-gauss_centroid.y*1, img.ymax-img.center.y*0-gauss_centroid.y*1, img.ymax-img.ymin+1)+0.*0.5\n X, Y = np.meshgrid(x,y)\n\n Q00 = np.sum(weight.array*img.array)\n Q10 = gauss_centroid.x + np.sum(X*weight.array*img.array)/Q00\n Q01 = gauss_centroid.y + np.sum(Y*weight.array*img.array)/Q00\n Q20 = np.sum((X**2)*weight.array*img.array)\n Q02 = np.sum((Y**2)*weight.array*img.array)\n\n monomial = 1.\n for pp in xrange(p):\n monomial *= X\n for qq in xrange(q):\n monomial *= Y\n Qpq = np.sum(monomial*weight.array*img.array) #/Q00\n\n return Qpq", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def initialize(self):\n\t\tmu = 0\n\t\tsigma = np.sqrt(2 / self.dataset[\"d\"])\n\n\t\tself.F1 = np.random.normal(mu, sigma, self.F1.shape)\n\t\tself.F2 = np.random.normal(mu, sigma, self.F2.shape)\n\t\tself.W = np.random.normal(mu, sigma, self.W.shape)\n\n\t\tself.F1_momentum = np.zeros(self.F1.shape)\n\t\tself.F2_momentum = np.zeros(self.F2.shape)\n\t\tself.W_momentum = np.zeros(self.W.shape)", "def init_hyperparameters():\n alpha = .8\n alpha2 = 1\n\n return alpha, alpha2", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def logdprior(parameters, hyperparameters):\n sigma_w_part = parameters[0] + invgamma_logpdf(parameters[0],\n hyperparameters[\"sigma_w_shape\"], hyperparameters[\"sigma_w_scale\"])\n sigma_v_part = parameters[1] + invgamma_logpdf(parameters[1], hyperparameters[\"sigma_v_shape\"], hyperparameters[\"sigma_v_scale\"])\n return sigma_w_part + sigma_v_part", "def initializeDistribution(self):\n if self.functionType == 'CDF':\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,True)\n else:\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,False)\n self.dimensionality = self._distribution.returnDimensionality()\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimensionality)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimensionality)]", "def get_initial_params(self, x, y, yerr):\n# p0 = zeros(self.max_phonons + 1)\n p0 = zeros(2)\n p0[0] = 100\n p0[1] = .1\n return p0", "def gaussian_parameters(h, dim=-1):\n m, h = torch.split(h, h.size(dim) // 2, dim=dim)\n v = F.softplus(h) + 1e-8\n return m, v", "def moments(values):\n\n meanValue = numpy.mean(values)\n return (meanValue,\n numpy.sqrt(moment(values, meanValue, 2)),\n moment(values, meanValue, 3),\n moment(values, meanValue, 4))" ]
[ "0.6455378", "0.6434524", "0.6434524", "0.6185545", "0.6109156", "0.6106636", "0.60708535", "0.60512894", "0.60178155", "0.5966822", "0.59502286", "0.58735156", "0.5850575", "0.58171284", "0.5816514", "0.57661724", "0.5720821", "0.57173246", "0.57122564", "0.5709464", "0.57005703", "0.56566393", "0.56566393", "0.56566393", "0.5650613", "0.5647901", "0.56205666", "0.56183773", "0.5581819", "0.5580499", "0.5579431", "0.5574771", "0.55595124", "0.5549218", "0.5547417", "0.55450284", "0.55362755", "0.5533073", "0.55328864", "0.55292463", "0.55152035", "0.55004686", "0.54993844", "0.54803777", "0.547777", "0.54711723", "0.54631597", "0.5454735", "0.54525715", "0.5436794", "0.54347324", "0.54311454", "0.54289407", "0.5415726", "0.5407236", "0.540343", "0.5398714", "0.5398714", "0.53965294", "0.5371645", "0.5370123", "0.5353437", "0.5351195", "0.5346446", "0.5343545", "0.53380007", "0.53346354", "0.5332762", "0.53268677", "0.53158087", "0.53155935", "0.5310139", "0.5309718", "0.5300877", "0.5296502", "0.5292304", "0.5292062", "0.528314", "0.5280636", "0.5273658", "0.52717936", "0.5267175", "0.5266302", "0.5263959", "0.525519", "0.52547693", "0.5243865", "0.52428687", "0.5240332", "0.52383465", "0.52359915", "0.5235011", "0.5232048", "0.5220906", "0.5220906", "0.5220359", "0.521662", "0.5211572", "0.5206511", "0.52055675" ]
0.6220392
3
Date the activity was created.
def creation_date(self) -> str: return pulumi.get(self, "creation_date")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_created(self) -> datetime:\n return self._date_created", "def created_date(self):\n return self._created_date", "def created_date(self):\n return self._created_date", "def date_created(self):\n return self._date_created", "def date_created(self):\n return self._date_created", "def date_created(self):\n return self._date_created", "def date(self):\n return DateTime(self.created)", "def date_created(self) -> str:\n return pulumi.get(self, \"date_created\")", "def get_creation_time(self):\n return self.get_attr('date_created')", "def created_date_time(self) -> str:\n return pulumi.get(self, \"created_date_time\")", "def created_on(self):\n return self.get_time(\"created_on\")", "def date(self):\n return self.status.created_at", "def getCreationDate(self):\n return self._creationDate", "def created_time(self) -> datetime.datetime:\n return self.__created_time", "def created_time(self) -> datetime.datetime:\n return self.__created_time", "def GetDateCreated(self):\n return str(self.datecreated)", "def getCreatedDate(self):\n return _libsbml.ModelHistory_getCreatedDate(self)", "def creation_date(self) -> datetime:\n date_string = self.get_main_information()['MainDicomTags']['InstanceCreationDate']\n time_string = self.get_main_information()['MainDicomTags']['InstanceCreationTime']\n\n return util.make_datetime_from_dicom_date(date_string, time_string)", "def get_creation_time(self):\n return self.creation_time", "def date_active(self):\n return datetime.datetime.fromtimestamp(self.fields['activityDate'])", "def creation_date_time(self) -> Optional[str]:\n return pulumi.get(self, \"creation_date_time\")", "def creation_time(self) -> str:\n return pulumi.get(self, \"creation_time\")", "def creation_time(self) -> str:\n return pulumi.get(self, \"creation_time\")", "def creation_time(self) -> str:\n return pulumi.get(self, \"creation_time\")", "def get_account_created_date(self):\n return self.account_created_date", "def created_at(self) -> datetime.datetime:\n return self._created_at", "def creationTime(self):\n \n if not self.logMessage is None :\n return self.logMessage[\"date\"]", "def created_on(self):\n return self._created_on", "def created_on(self):\n return self._created_on", "def created_on(self):\n return self._created_on", "def created_on(self):\n return self._created_on", "def get_created_at(self, instance):\n return instance.created_at.strftime(\"%B %d, %Y\")", "def get_created_at(self, instance):\n return instance.created_at.strftime(\"%B %d, %Y\")", "def get_created_at(self, instance):\n return instance.created_at.strftime(\"%B %d, %Y\")", "def create_time(self):\n return self._create_time", "def create_time(self):\n return self._create_time", "def create_time(self):\n return self._create_time", "def created(self) -> datetime.datetime:\n # REMARK: On Unix systems getctime() returns the time of most recent\n # metadata change, but not the creation.\n # https://stackoverflow.com/questions/237079/how-do-i-get-file-creation-and-modification-date-times\n # https://docs.python.org/3/library/os.html#os.stat_result\n if platform.system() == \"Windows\":\n timestamp = os.path.getctime(self._manifest_path)\n\n else:\n stat = os.stat(self._manifest_path)\n try:\n timestamp = stat.st_birthtime\n except AttributeError:\n timestamp = stat.st_mtime\n\n return datetime.datetime.fromtimestamp(timestamp)", "def created_at(self):\n return self._domain.created_at", "def created(self) -> datetime:\n return datetime.strptime(self.data['created_at'],\n '%Y-%m-%dT%H:%M:%S.%fZ')", "def created_at(self):\n created_at = self.joined_at\n if created_at is None:\n created_at = DISCORD_EPOCH_START\n \n return created_at", "def creation_timestamp(self) -> str:\n return pulumi.get(self, \"creation_timestamp\")", "def creation_timestamp(self) -> str:\n return pulumi.get(self, \"creation_timestamp\")", "def creation_timestamp(self) -> str:\n return pulumi.get(self, \"creation_timestamp\")", "def time_created(self):\n return self._time_created", "def time_created(self):\n return self._time_created", "def time_created(self):\n return self._time_created", "def time_created(self):\n return self._time_created", "def created_at(self):\n return self._created_at", "def created_at(self):\n return self._created_at", "def created_at(self):\n return self._created_at", "def created_at(self):\n return self._created_at", "def create_at(self):\n return self._create_at", "def created_at(self) -> str:\n return pulumi.get(self, \"created_at\")", "def created_at(self) -> str:\n return pulumi.get(self, \"created_at\")", "def created_at(self) -> str:\n return pulumi.get(self, \"created_at\")", "def created(self):\n return datetime.utcfromtimestamp(self.create_ts)", "def created(self) -> str:\n return pulumi.get(self, \"created\")", "def created(self) -> str:\n return pulumi.get(self, \"created\")", "def created(self) -> str:\n return pulumi.get(self, \"created\")", "def created_at(self):", "def ship_date(self):\n return self.created.date()", "def CreatedAt(self):\n return self._created_at", "def created_at(self):\n return self.viztrail.created_at", "def creation_datetime(self) -> datetime:\n return utc_to_local(self._db_data.creation_datetime)", "def created_on(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_on\")", "def Created(self):\n return self._get_attr('Created')", "def created_timestamp(self):\n return self._created_timestamp", "def prepare_actor_created_date(self, object):\n if object.actor_created is not None:\n return object.actor_created.date()\n else:\n return ''", "def created_at(self) -> \"datetime\":\n return self._attrs.get(\"createdAt\")", "def created_at(self) -> \"datetime\":\n return self._attrs.get(\"createdAt\")", "def created_at(self) -> \"datetime\":\n return self._attrs.get(\"createdAt\")", "def create(self):\n self.created_date = timezone.now()\n self.save()", "def created_at(self) -> datetime:\n return util.to_datetime(self.doc.get('createdAt'))", "def created_at(self):\n return self.getattr('created_at')", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")", "def created_at(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_at\")", "def created_at(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_at\")", "def created_at(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_at\")", "def created_at(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_at\")" ]
[ "0.8107826", "0.7986173", "0.7986173", "0.797429", "0.797429", "0.797429", "0.7832342", "0.78072244", "0.7796537", "0.7792625", "0.76402485", "0.75940555", "0.7574318", "0.75237525", "0.75237525", "0.75183827", "0.7475857", "0.74230283", "0.73979384", "0.7372514", "0.7362803", "0.73345935", "0.73345935", "0.73345935", "0.7326675", "0.73040885", "0.72915304", "0.7272864", "0.7272864", "0.7272864", "0.7272864", "0.7268665", "0.7268665", "0.7268665", "0.7264335", "0.7264335", "0.7264335", "0.7228239", "0.720925", "0.71971023", "0.7189651", "0.7188786", "0.7188786", "0.7188786", "0.71818775", "0.71818775", "0.71818775", "0.71818775", "0.7177616", "0.7177616", "0.7177616", "0.7177616", "0.7175804", "0.7172468", "0.7172468", "0.7172468", "0.7164514", "0.71526116", "0.71526116", "0.71526116", "0.7146112", "0.71457726", "0.7144979", "0.70822793", "0.70509106", "0.7047033", "0.70460594", "0.7041155", "0.7039157", "0.70338184", "0.70338184", "0.70338184", "0.700291", "0.6991079", "0.6985807", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.6963726", "0.69629943", "0.69629943", "0.69629943", "0.69629943" ]
0.8029383
3
The providerassigned unique ID for this managed resource.
def id(self) -> str: return pulumi.get(self, "id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def provider_id(self):\n return self.get('_id')", "def provider_id(self):\n raise NotImplementedError", "def id(self):\n return self.raw_resource.uuid", "def healthcare_provider_id(self):\n return self._healthcare_provider_id", "def unique_identifier(self) -> str:\n return pulumi.get(self, \"unique_identifier\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")", "def unique_id(self):\r\n return f\"{DOMAIN}_{self.charge_point_id}_{self.connector_id}\"", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self):\n return self._uuid", "def unique_id(self):\n return self._uuid", "def unique_id(self) -> str:\n return self._uid", "def unique_id(self):\n return f\"{self.device.id}-{self.key}\"", "def unique_id(self):\n return self.properties.get(\"UniqueId\", None)", "def internal_id(self) -> str:\n return pulumi.get(self, \"internal_id\")", "def unique_id(self) -> str:\n return f\"{self._host}_{self._name}_{self._unique_id}\"", "def custom_id(self) -> str:\n return self._underlying.custom_id", "def unique_id(self):\n return self._id", "def unique_id(self):\n return self._id", "def unique_id(self):\n return (\n \"a80f3d5b-df3d-4e38-bbb7-1025276830cd\"\n )", "def get_objectID(self):\n return self.resource.uuid", "def unique_id(self):\n return self._uid", "def unique_id(self):\n return self._uid", "def unique_id(self):\n return self._uid", "def unique_id(self):\n id = \"{}{}{}\".format(\n DOMAIN, self._account, self.sensorName.lower().replace(\" \", \"\")\n )\n return id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self) -> str:\n return self.get_unique_id(wallet=self.wallet_id, nonce=self.nonce)", "def unique_id(self):\n return self.device_id", "def get_id(self):\n \"\"\"Requires use of Python 3\"\"\"\n return str(self.id)", "def resourceid(self):", "def unique_id(self) -> str:\n return '{0}_{1}'.format(self._mac.replace(':', ''), self.entity_id)", "def unique_id(self):\n return self._device_id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def get_id(self):\n return str(self._id)", "def get_id(self):\n return str(self._id)", "def identity(self) -> str:\n return self.requester.uuid", "def get_id(self) -> str:\n return self._register_id", "def get_unique_id(self):\n if not self.unique_id:\n self.unique_id = uuid.uuid4().hex\n return self.unique_id", "def unique_id(self):\n return f\"{self.config_entry.entry_id}_{self.hub_name}_{self.sensor_name}\"", "def getID(self):\n return str(self._storage_id)", "def id(self):\n return self.raw_resource[\"id\"]", "def get_id(self): # real signature unknown; restored from __doc__\n return \"\"", "def id(self) -> str:\r\n return self._id", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def identifier(self):\n return self.__id", "def get_id(self):\n return self.uid", "def resource_id(self) -> Optional[str]:\n return pulumi.get(self, \"resource_id\")", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id" ]
[ "0.8193402", "0.7851373", "0.77124894", "0.7604287", "0.7477648", "0.7476093", "0.7476093", "0.7476093", "0.7425807", "0.7380237", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.735787", "0.735787", "0.73477197", "0.7291611", "0.72812176", "0.72517675", "0.7251651", "0.7218092", "0.7211636", "0.7211636", "0.7201574", "0.7181422", "0.7166036", "0.7166036", "0.7166036", "0.7138984", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.7133902", "0.7126198", "0.7119549", "0.71155995", "0.70892346", "0.7068222", "0.7059289", "0.7059289", "0.7059289", "0.7059289", "0.7059289", "0.7059289", "0.70582974", "0.70582974", "0.7053728", "0.70350826", "0.70212394", "0.7020135", "0.7014936", "0.7014571", "0.70135075", "0.7007213", "0.69911283", "0.69911283", "0.69911283", "0.69911283", "0.6989271", "0.69725364", "0.69559777", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233" ]
0.0
-1
Provides a Step Functions Activity data source Example Usage ```python import pulumi import pulumi_aws as aws sfn_activity = aws.sfn.get_activity(name="myactivity") ```
def get_activity(arn: Optional[str] = None, name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetActivityResult: __args__ = dict() __args__['arn'] = arn __args__['name'] = name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('aws:sfn/getActivity:getActivity', __args__, opts=opts, typ=GetActivityResult).value return AwaitableGetActivityResult( arn=pulumi.get(__ret__, 'arn'), creation_date=pulumi.get(__ret__, 'creation_date'), id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_activities():\n pass", "def get_activity_output(arn: Optional[pulumi.Input[Optional[str]]] = None,\n name: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetActivityResult]:\n ...", "def construct_strava_activity_data(activity):\n # if the timestamp has been saved then use this over converting the other one\n # issues with server tz so better to use the timestamp at the point the activity record was created\n if activity.iso_timestamp:\n local_time = activity.iso_timestamp\n else:\n local_time = activity.local_timestamp.isoformat()\n\n data = {'name': activity.title,\n 'type': STRAVA_ACTIVITIES_LOOKUP[activity.type],\n 'start_date_local': local_time,\n 'elapsed_time': activity.duration * 60, # need to convert to seconds, stored in db as minutes\n 'description': activity.description}\n\n if activity.distance is not None and activity.distance > 0:\n data['distance'] = activity.distance * 1000 # Strava API requires distance in m, stored in db as km\n\n return data", "def _read_activity(session_path: Path):\n # Read activity file\n df_act = pd.read_csv(\n session_path / ACTIVITY_FILE,\n names=ACTIVITY_FILE_COLUMNS,\n usecols=[\n \"subject\",\n \"session_number\",\n \"start_time\",\n \"end_time\",\n \"gesture_scenario\",\n \"task_id\",\n ],\n header=None,\n engine=\"c\",\n )\n # Timestamps as additional datetime columns\n df_act[\"start_time_dt\"] = pd.to_datetime(df_act[\"start_time\"], unit=\"ms\")\n df_act[\"end_time_dt\"] = pd.to_datetime(df_act[\"end_time\"], unit=\"ms\")\n\n return df_act", "def get_activity(variable):\n project = variable['project']\n try:\n exp = variable['exp']\n if isinstance(exp, list):\n return [CMOR_TABLES[project].activities[value][0] for value in exp]\n return CMOR_TABLES[project].activities[exp][0]\n except (KeyError, AttributeError):\n return None", "def get_activities(ts_activity, access_token):\n params = {'after': ts_activity, 'access_token': access_token}\n url = \"https://www.strava.com/api/v3/activities\"\n response = return_json(url, \"GET\", parameters=params)\n return response", "def activity():\n return {\n \"type\": \"class\",\n \"base\": None,\n \"is_abstract\": True,\n \"is_document\": True,\n \"pstr\": (\"{}\", (\"canonical_name\",)),\n \"properties\": [\n (\n \"alternative_names\",\n \"str\",\n \"0.N\",\n \"List of names by which the activity is also known.\",\n ),\n (\n \"canonical_name\",\n \"str\",\n \"0.1\",\n \"Community defined identifier or name.\",\n ),\n (\n \"citations\",\n \"linked_to(shared.citation)\",\n \"0.N\",\n \"Set of pertinent citations.\",\n ),\n (\n \"description\",\n \"str\",\n \"0.1\",\n \"Description of what is to be done (or was done).\",\n ),\n (\n \"duration\",\n \"time.time_period\",\n \"0.1\",\n \"Time the activity was (or will be) active.\",\n ),\n (\n \"internal_name\",\n \"str\",\n \"0.1\",\n \"A name used for internal purposes.\",\n ),\n (\"keywords\", \"str\", \"0.1\", \"User defined keywords.\"),\n (\"long_name\", \"str\", \"0.1\", \"Longer version of activity name.\"),\n (\"name\", \"str\", \"1.1\", \"Short name or abbreviation.\"),\n (\n \"responsible_parties\",\n \"shared.responsibility\",\n \"0.N\",\n \"People or organisations responsible for activity.\",\n ),\n (\n \"previously_known_as\",\n \"str\",\n \"0.N\",\n \"List of names by which the activity was formerly known.\",\n ),\n (\n \"rationale\",\n \"str\",\n \"0.1\",\n \"Explanation of why this activity was carried out and/or what \"\n \"it was intended to achieve.\",\n ),\n ],\n }", "def get_activity_object(activity_name, settings, logger, conn, token, activity_task):\n full_path = \"activity.\" + activity_name + \".\" + activity_name\n f = eval(full_path)\n # Create the object\n activity_object = f(settings, logger, conn, token, activity_task)\n return activity_object", "def activity(self, activity_id):\r\n return resources.Activity(self, activity_id)", "def _request_activity_data(self, athlete, filename):\n response = self._get_request(self._activity_endpoint(athlete, filename)).json()\n\n activity = pd.DataFrame(response['RIDE']['SAMPLES'])\n activity = activity.rename(columns=ACTIVITY_COLUMN_TRANSLATION)\n\n activity.index = pd.to_timedelta(activity.time, unit='s')\n activity.drop('time', axis=1, inplace=True)\n\n return activity[[i for i in ACTIVITY_COLUMN_ORDER if i in activity.columns]]", "def test_get_activity(self):\n pass", "def test_get_activity(self):\n pass", "def get_activity():\n try:\n activity = Activity.objects.filter(active=1).latest('id')\n except Activity.DoesNotExist:\n activity = None\n return activity", "def get_activity(self, filename):\n return self._request_activity_data(self.athlete, filename)", "def get_continuous_activity(self):\n from .continuousactivity import DSSContinuousActivity\n return DSSContinuousActivity(self.client, self.project_key, self.recipe_name)", "def get_activity_stream(token, activity, types, series_type='time', resolution='high'):\n types = ','.join(types)\n params = {'access_token': token}\n url = f'https://www.strava.com/api/v3/activities/{activity}/streams/{types}&series_type={series_type}&resolution={resolution}&key_by_type='\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response", "def test_get_activities(self):\n pass", "def activity(self, activity_id):\r\n return activities.Activity(self, activity_id)", "def getactivity(self) -> Optional[ba.Activity]:\n stats = self._stats()\n if stats is not None:\n return stats.getactivity()\n return None", "def by_activity(cls,site_id=0,activity=None):\n return meta.DBSession.query(Activity).filter_by(site_id=site_id,activity=activity).all()", "def get_current_activity(client):\n func = client.get_current_activity()\n activity_id = run_in_loop_now('get_current_activity', func)\n label = activities_by_id[str(activity_id)]\n return label", "def getUserActivities(context, request):\n mmdb = MADMaxDB(context.db)\n query = {}\n query['actor.username'] = request.actor['username']\n query['verb'] = 'post'\n chash = request.params.get('context', None)\n if chash:\n query['contexts.hash'] = chash\n\n is_head = request.method == 'HEAD'\n activities = mmdb.activity.search(query, sort=\"_id\", keep_private_fields=False, flatten=1, count=is_head, **searchParams(request))\n\n handler = JSONResourceRoot(activities, stats=is_head)\n return handler.buildResponse()", "def activity(self):\n return self._activity", "def fetch(self, activity):\n return None, None", "def get_activity_name(activityType):\n return \"activity_\" + activityType", "def get_activities(self, user_id=None, group_id=None, app_id=None,\n activity_id=None, start_index=0, count=0):\n raise NotImplementedError()", "def manipulate_activity():\n pass", "def get_activity_data(self, rid):\n raise NotImplementedError", "def fetch_github_activity(gen, metadata):\n\n if \"GITHUB_ACTIVITY_FEED\" in gen.settings.keys():\n gen.context[\"github_activity\"] = gen.plugin_instance.fetch()", "def convert_activity(ast):\n\n if len(ast.args) > 1:\n logger.error(f\"Activity should not have more than 1 argument {ast.to_string()}\")\n\n p_arg = ast.args[0] # protein argument\n print(\"p_arg\", p_arg)\n ma_arg = Function(\"ma\", version=version)\n ma_arg.add_argument(StrArg(ast.name, ma_arg))\n p_arg.change_parent_fn(ma_arg)\n ast = Function(\"activity\", version=version)\n p_arg.change_parent_fn(ast)\n ast.add_argument(p_arg)\n ast.add_argument(ma_arg)\n\n return ast", "def getActivity(self):\n return self.activity", "def all_activity(self):\n\t\tself.db = DB()\n\t\tactivity_all = self.db.select_all_from(\"activity\")\n\t\ttmpl = lookup.get_template(\"activity.html\")\n\t\treturn (tmpl.render(activity=activity_all))", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def start_activity(self,param={},ignore_error_handle = False):\n message = {};\n step = 'start activity by app package \\'' + param.get('package') + '\\' and activity name \\'' + param.get('activity') + '\\'';\n package = param.get('package');\n activity = param.get('activity');\n try:\n self.driver.start_activity(package,activity);\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def getactivity(self) -> Optional[ba.Activity]:\n if self._activity is None:\n return None\n return self._activity()", "def activities(self):\r\n return resources.Activities(self)", "def action(self):\n return self.rowTime.activity", "def unserialise_activity_json(pathName):\n to_return = []\n\n try:\n with open(pathName) as json_file:\n json_data = json.load(json_file)\n\n for activity in json_data[\"data\"]:\n to_return.append(\n Activity(\n activity[\"ComInsee\"],activity[\"ComLib\"],\n activity[\"EquipementId\"],activity[\"EquNbEquIdentique\"],\n activity[\"ActCode\"],\n activity[\"ActLib\"],activity[\"EquActivitePraticable\"],\n activity[\"EquActivitePratique\"],\n activity[\"EquActiviteSalleSpe\"],\n activity[\"ActNivLib\"]\n )\n )\n except FileNotFoundError:\n print(\"bad path of activity json file\")\n except KeyError:\n print(\"bad json file, see documentation of activity for see how is construct this object\")\n return to_return", "def test_get_activity(self):\n activity = self.client.get_activity(96089609)\n self.assertEquals('El Dorado County, CA, USA', activity.location_city)\n\n self.assertIsInstance(activity.start_latlng, attributes.LatLon)\n self.assertAlmostEquals(-120.4357631, activity.start_latlng.lon, places=2)\n self.assertAlmostEquals(38.74263759999999, activity.start_latlng.lat, places=2)\n\n self.assertIsInstance(activity.map, model.Map)\n\n self.assertIsInstance(activity.athlete, model.Athlete)\n self.assertEquals(1513, activity.athlete.id)\n\n #self.assertAlmostEqual(first, second, places, msg, delta)\n # Ensure that iw as read in with correct units\n self.assertEquals(22.5308, float(uh.kilometers(activity.distance)))", "def get_input(activity_task):\n try:\n input = json.loads(activity_task[\"input\"])\n except KeyError:\n input = None\n return input", "def return_activity(end):\n if end == \"Seattle\":\n return Seattle_Activities\n if end == \"San Francisco\":\n return SanFrancisco_Activities\n if end == \"Los Angeles\":\n return LosAngeles_Activities\n if end == \"Las Vegas\":\n return LasVegas_Activities\n if end == \"Portland\":\n return Portland_Activities\n else:\n return SanDiego_Activities", "def load_vocal_activity(fhandle: TextIO) -> annotations.EventData:\n begs = [] # timestamps of vocal-instrument activity beginnings\n ends = [] # timestamps of vocal-instrument activity endings\n events = [] # vocal-instrument activity labels\n\n reader = csv.reader(fhandle, delimiter=\"\\t\")\n raw_data = []\n for line in reader:\n if line[0] != \"Piece No.\":\n raw_data.append(line)\n\n for i in range(len(raw_data)):\n # Parsing vocal-instrument activity as intervals (beg, end, event)\n if raw_data[i] != raw_data[-1]:\n begs.append(float(raw_data[i][0]))\n ends.append(float(raw_data[i + 1][0]))\n events.append(raw_data[i][1])\n\n return annotations.EventData(np.array([begs, ends]).T, \"s\", events, \"open\")", "def activities(self):\r\n return v3.Activities(self)", "def test_get_activity_occurrence_details(self):\n pass", "def update_activity():\n pass", "def _activity(\n run, baseline_activity=0., baseline_sigma=3.0,\n trace_type='deconvolved'):\n if trace_type != 'deconvolved':\n raise ValueError(\n 'Temporal classifier only implemented for deconvolved data.')\n\n if run.run_type == 'spontaneous' and 'sated' in run.tags:\n runs = run.parent.runs(run_types=['spontaneous'], tags=['sated'])\n spontaneous = True\n elif run.run_type == 'spontaneous' and 'hungry' in run.tags:\n runs = run.parent.runs(run_types=['spontaneous'], tags=['hungry'])\n spontaneous = True\n elif run.run_type == 'training':\n runs = run.parent.runs(run_types=['training'])\n spontaneous = False\n else:\n raise ValueError(\n 'Unknown run_type and tags, not sure how to calculate activity.')\n\n baseline, variance, outliers = None, None, None\n if spontaneous:\n popact, outliers = [], []\n for r in runs:\n t2p = r.trace2p()\n pact = t2p.trace('deconvolved')\n fmin = t2p.lastonset()\n mask = t2p.inactivity()\n mask[:fmin] = False\n\n if len(popact):\n popact = np.concatenate([popact, pact[:, mask]], axis=1)\n else:\n popact = pact[:, mask]\n\n trs = t2p.trace('deconvolved')[:, fmin:]\n cellact = np.nanmean(trs, axis=1)\n outs = cellact > np.nanmedian(cellact) + 2*np.std(cellact)\n\n if len(outliers) == 0:\n outliers = outs\n else:\n outliers = np.bitwise_or(outliers, outs)\n\n if len(popact):\n popact = np.nanmean(popact[np.invert(outliers), :], axis=0)\n\n baseline = np.median(popact)\n variance = np.std(popact)\n outliers = outliers\n else:\n popact = []\n for r in runs:\n t2p = r.trace2p()\n ncells = t2p.ncells\n pact = np.nanmean(t2p.trace('deconvolved'), axis=0)\n skipframes = int(t2p.framerate*4)\n\n for cs in ['plus*', 'neutral*', 'minus*', 'pavlovian*']:\n onsets = t2p.csonsets(cs)\n for ons in onsets:\n pact[ons:ons+skipframes] = np.nan\n popact = np.concatenate([popact, pact[np.isfinite(pact)]])\n\n if len(popact):\n # baseline = np.median(popact)\n\n # Exclude extremes\n percent = 2.0\n popact = np.sort(popact)\n trim = int(percent*popact.size/100.)\n popact = popact[trim:-trim]\n\n baseline = np.median(popact) # Moved to after extreme exclusion on 190326\n variance = np.std(popact)\n outliers = np.zeros(ncells, dtype=bool)\n\n if baseline is None:\n baseline, variance = 0.01, 0.08*baseline_sigma\n else:\n baseline *= baseline_activity\n variance *= baseline_sigma\n\n return baseline, variance, outliers", "def activity(name):\n\n name_split = name.split(\",\")\n if \"Irrigation\" in name and \"gal\" not in name_split[1]:\n n = name_split[0] + \",\" + name_split[1]\n else:\n n = name_split[0]\n\n if \" to \" in n:\n activity = n.split(\" to \")\n name = split_name(activity[0])\n produced = name[0]\n consumed = capitalize_first_letter(activity[1])\n elif \" from \" in n:\n if \")\" in n:\n open_paren_split = n.split(\"(\")\n capitalized_string = capitalize_first_letter(open_paren_split[0])\n close_paren_split = open_paren_split[1].split(\")\")\n produced_split = close_paren_split[1].split(\" from \")\n produced = capitalize_first_letter(produced_split[1].strip())\n consumed = capitalized_string.strip() + \" \" + close_paren_split[0].strip()\n else:\n activity = n.split(\" from \")\n name = split_name(activity[0])\n produced = capitalize_first_letter(activity[1])\n consumed = name[0].strip()\n elif \"consumptive\" in n:\n if \")\" in n:\n open_paren_split = n.split(\"(\")\n capitalized_string = capitalize_first_letter(open_paren_split[0])\n close_paren_split = open_paren_split[1].split(\")\")\n produced = capitalized_string.strip() + \" \" + close_paren_split[0].strip()\n consumed = None\n else:\n split_case = split_name(n)\n consumed = None\n produced = capitalize_first_letter(split_case[0])\n elif \")\" in n:\n produced = None\n open_paren_split = n.split(\"(\")\n capitalized_string = capitalize_first_letter(open_paren_split[0])\n close_paren_split = open_paren_split[1].split(\")\")\n consumed = capitalized_string.strip() + \" \" + close_paren_split[0].strip()\n elif \"total deliveries\" in n:\n split_case = split_name(n)\n consumed = None\n produced = capitalize_first_letter(split_case[0])\n elif \"Self-supplied\" in n:\n split_case = split_name(n)\n produced = None\n consumed = capitalize_first_letter(split_case[1])\n else:\n split_case = split_name(n)\n produced = None\n consumed = capitalize_first_letter(split_case[0])\n return pd.Series([produced, consumed])", "def show_activities(self): \n database = Database('data/database.db')\n activities = database.read_activities()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name] for item in activities],\n pageTitle = \"Activités\",\n tableTitle = \"Liste de toutes les activités\",\n ths = [\"Numéro\", \"Nom\"]\n )", "def get_activities(self, type=None):\n return flattrclient._get_query_dict(type=type)", "def create_activity(request: Request, activity_type: str, msg_context: dict, object_id: UUID, user: User):\n dbsession = Session.object_session(user)\n\n stream = Stream.get_or_create_user_stream(user)\n\n a = Activity()\n a.object_id = object_id\n a.activity_type = activity_type\n a.msg_context = msg_context\n\n stream.activities.append(a)\n dbsession.flush()\n\n return a", "def create_activity(activity_id):\n\n # get the activity\n activity = Activity.query.filter_by(id=activity_id).first()\n\n if activity:\n # now get a valid token for the associated user\n access_token = refresh_access_token(user_id=activity.user_id)\n if access_token is None:\n # an error must have occurred\n current_app.logger.error('Cannot save activity {} to Strava as unable to refresh token'.format(activity_id))\n # let the app continue on as error has been logged\n return 200\n\n url = 'https://www.strava.com/api/v3/activities'\n headers = {'Authorization': 'Bearer {}'.format(access_token)}\n\n data = construct_strava_activity_data(activity)\n response = requests.post(url, headers=headers, data=data)\n strava_athlete = StravaAthlete.query.filter_by(user_id=activity.user_id).first()\n log_strava_event(strava_athlete.athlete_id, \"Activity\")\n\n # check the response, if there has been an error then need to log this\n if response.status_code != 200:\n current_app.logger.error('Strava Status code: {}'.format(response.status_code))\n current_app.logger.error('Strava Response: {}'.format(response.json))\n return response.status_code\n # log an error if the activity doesn't exist but allow app to continue on\n current_app.logger.error('Activity {} does not exist'.format(activity_id))\n return 200", "def test_days_weeks_activity():\n assert analytics.activity('daily', yoga_trackings(), 1) == 17\n assert analytics.activity('weekly', run_trackings(), 1) == 4\n assert analytics.activity('daily', read_trackings(), 1) == 18\n assert analytics.activity('daily', meditation_trackings(), 1) == 15\n assert analytics.activity('weekly', french_trackings(), 1) == 5", "def get_activities_by_session_id(self, session_id):\n return self._db.get_all(\"\"\"\n SELECT * FROM activity_log\n WHERE session_id = ?\"\"\", (session_id, ))", "def getOLAPSource():", "def get_activity_name(activity):\n for key in activity.attrib.keys():\n if key.endswith(\"name\"):\n return activity.attrib[key]", "def get_activitytosector_mapping(source, fbsconfigpath=None):\n from flowsa.settings import crosswalkpath\n # identify mapping file name\n mapfn = f'NAICS_Crosswalk_{source}'\n\n # if FBS method file loaded from outside the flowsa directory, check if\n # there is also a crosswalk\n external_mappingpath = f\"{fbsconfigpath}activitytosectormapping/\"\n if os.path.exists(external_mappingpath):\n activity_mapping_source_name = get_flowsa_base_name(\n external_mappingpath, mapfn, 'csv')\n if os.path.isfile(f\"{external_mappingpath}\"\n f\"{activity_mapping_source_name}.csv\"):\n log.info(f\"Loading {activity_mapping_source_name}.csv \"\n f\"from {external_mappingpath}\")\n crosswalkpath = external_mappingpath\n activity_mapping_source_name = get_flowsa_base_name(\n crosswalkpath, mapfn, 'csv')\n mapping = pd.read_csv(f'{crosswalkpath}{activity_mapping_source_name}.csv',\n dtype={'Activity': 'str', 'Sector': 'str'})\n # some mapping tables will have data for multiple sources, while other\n # mapping tables are used for multiple sources (like EPA_NEI or BEA\n # mentioned above) so if find the exact source name in the\n # ActivitySourceName column use those rows if the mapping file returns\n # empty, use the original mapping file subset df to keep rows where\n # ActivitySourceName matches source name\n mapping2 = mapping[mapping['ActivitySourceName'] == source].reset_index(\n drop=True)\n if len(mapping2) > 0:\n return mapping2\n else:\n return mapping", "def activity(self, activity):\n if activity is None:\n raise ValueError(\"Invalid value for `activity`, must not be `None`\") # noqa: E501\n\n self._activity = activity", "def load_exported_activities() -> List[DiscoveredActivities]:\n activities = []\n activities.extend(discover_actions(\"chaosgcp.gke.nodepool.actions\"))\n activities.extend(discover_probes(\"chaosgcp.gke.nodepool.probes\"))\n activities.extend(discover_actions(\"chaosgcp.sql.actions\"))\n activities.extend(discover_probes(\"chaosgcp.sql.probes\"))\n activities.extend(discover_probes(\"chaosgcp.storage.probes\"))\n activities.extend(discover_actions(\"chaosgcp.cloudbuild.actions\"))\n activities.extend(discover_probes(\"chaosgcp.cloudbuild.probes\"))\n activities.extend(discover_actions(\"chaosgcp.cloudrun.actions\"))\n activities.extend(discover_probes(\"chaosgcp.cloudrun.probes\"))\n activities.extend(discover_probes(\"chaosgcp.monitoring.probes\"))\n activities.extend(discover_probes(\"chaosgcp.cloudlogging.probes\"))\n activities.extend(discover_probes(\"chaosgcp.artifact.probes\"))\n activities.extend(discover_actions(\"chaosgcp.lb.actions\"))\n return activities", "def get_activity(self, type=False,user=False,date_min=False,date_max=False):\n\n if type:\n return self.execute(TABELLE['activity']['select']['by_type'],(type,))\n elif user:\n return self.execute(TABELLE['activity']['select']['by_user'],(user,))\n elif date_min:\n return self.execute(TABELLE['activity']['select']['by_date_min'],(date_min,))\n elif date_max:\n return self.execute(TABELLE['activity']['select']['by_date_max'],(date_max,))\n # se le chiavi sono tutte false allora prendo tutte le activity\n elif not type and not user and not date_max and not date_min:\n return self.execute(TABELLE['activity']['select']['all'])\n else:\n return False", "def transform_data_for_user_activities(df: DataFrame) -> Tuple[DataFrame, str]:\n user_activities_df = df[['user_id', 'time_stamp', 'url_level1', 'url_level2', 'url_level3', 'activity']]\n return user_activities_df, USER_ACTIVITIES_OUTPUT_FILENAME", "def _activity_endpoint(self, athlete, filename):\n return '{host}{athlete}/activity/{filename}'.format(\n host=self.host,\n athlete=quote_plus(athlete),\n filename=filename\n )", "def add_strava_data_to_activities(self):\n \n try:\n logging.info(\"Parsing Strava data and getting it ready for analysis.\")\n\n strava_activities = self.strava_fetcher.fetch_strava_activities()\n if strava_activities == None:\n logging.info(\"No Strava data to add to all activities\")\n return\n\n strava_data = json.dumps(strava_activities)\n \n # load strava data straight up from json, not doing any json normalization\n strava_df = pd.read_json(strava_data)\n strava_df = strava_df[['distance', \n 'elapsed_time', \n 'start_date_local', \n 'location_city', \n 'average_speed', \n 'max_speed', \n 'type']]\n\n # set up 5 key metrics\n # note we're using the enum value\n strava_df['activity_type'] = strava_df['type'].apply(lambda x: self.convert_strava_activity_type(x).value)\n strava_df['source'] = ActivitySource.STRAVA.value\n strava_df['start_timestamp'] = strava_df['start_date_local'].apply(lambda x: parse(x, tzinfos={\"America/Vancouver\"}))\n # strava distances are in meters\n strava_df['distance_in_km'] = strava_df['distance'].apply(lambda x: x / 1000)\n strava_df['duration_in_min'] = strava_df['elapsed_time'].apply(lambda x: x / 60)\n\n # filter out extraneous columns\n strava_df = strava_df.filter(self.data_frame_columns)\n\n # add to activities\n self.all_activities = self.all_activities.append(strava_df, sort=True)\n\n logging.info(\"Done parsing Strava data.\")\n except Exception:\n logging.exception(\"Could not parse Strava data\")", "def get_activity_feed(context, term):\n if not term:\n raise ValueError('You have to provide a search term!')\n url = '{}{}'.format(context.test_url, term)\n response = requests.get(url, timeout=context.request_timeout)\n context.response = response\n logging.debug('Request URL: %s', response.request.url)\n logging.debug('Request headers:\\n%s', pformat(response.request.headers))\n logging.debug('Response headers:\\n%s', pformat(response.headers))\n logging.debug('Response content:\\n%s', pformat(response.json()))", "def separate_activity_types(self):\n # Read in the CSV file and make a DataFrame.\n try :\n all_actsDF = pd.read_csv('strava-activities.csv', index_col=\"id\", parse_dates=[\"start_date\", \"start_date_local\"])\n except FileNotFoundError :\n print(\"separate_activity_types couldn't find strava-activities.csv.\")\n else :\n # We need to make sure that all_actsDF has all of the columns that are referenced\n # in the loop below. Otherwise, the code might throw a key error. For example, if someone\n # has no heart rate data at all, stava-activities.csv won't have a max_heartrate column,\n # causing the code to blow up when it looks for that column. So just add empty columns\n # as needed.\n necessary_columns = [\"distance\", \"total_elevation_gain\", \"elapsed_time\", \"moving_time\", \"max_speed(mph)\", \"max_speed(kph)\", \"start_date\", \"elevation_gain(ft)\", \"max_heartrate\"]\n for col in necessary_columns :\n if not col in all_actsDF.columns :\n all_actsDF[col] = np.nan\n\n # Get the list of unique activity types (Ride, Hike, Kayak, etc.)\n act_types = all_actsDF[\"type\"].unique()\n # Get the list of unique years in the data.\n # Extract each year out of the data and sort them.\n years = pd.Series(d.year for d in all_actsDF[\"start_date\"]).unique()\n years.sort()\n\n # Create a dataframe that will hold summary statistics for each activity.\n # The index or the set of rows is the activity types. The columns are the stats\n # we are interested in.\n stats = [\"Total Distance (miles)\", \"Total Distance (km)\", \"Total Elev. Gain (meters)\", \"Total Elev. Gain (ft)\", \"Total Elev. Gain (miles)\", \"Total Elev. Gain (km)\", \"Total Duration (hours)\", \"Total Duration (days)\", \"Average Duration (min)\", \"Total Moving Time (hours)\", \"Total Moving Time (days)\", \"Average Moving Time (min)\", \"Average Speed (mph)\", \"Average Speed (kph)\", \"Max Speed (mph)\", \"Max Speed (kph)\", \"Max Speed Date\", \"Max Elevation Gain(ft)\", \"Max Elevation Gain(m)\", \"Max Elevation Gain Date\", \"Max Heart Rate\", \"Max HR Date\"]\n summaryDF = pd.DataFrame(index=act_types, columns=stats)\n # Loop through all of the activity types and add info into the summary file.\n # Also create a csv for each activity that has the Strava info for that activity only.\n for act in act_types:\n actDF = all_actsDF[all_actsDF[\"type\"] == act]\n actDF.to_csv(act + \".csv\")\n # Add the summary stats\n summaryDF.loc[act, \"Total Distance (miles)\"] = actDF[\"distance\"].sum() * 0.000621371\n summaryDF.loc[act, \"Total Distance (km)\"] = actDF[\"distance\"].sum() / 1000\n summaryDF.loc[act, \"Total Elev. Gain (meters)\"] = actDF[\"total_elevation_gain\"].sum()\n summaryDF.loc[act, \"Total Elev. Gain (ft)\"] = actDF[\"total_elevation_gain\"].sum() * 3.28084\n summaryDF.loc[act, \"Total Elev. Gain (miles)\"] = actDF[\"total_elevation_gain\"].sum() * 3.28084/5280\n summaryDF.loc[act, \"Total Elev. Gain (km)\"] = actDF[\"total_elevation_gain\"].sum() / 1000\n summaryDF.loc[act, \"Total Duration (hours)\"] = actDF[\"elapsed_time\"].sum() / 3600\n summaryDF.loc[act, \"Total Duration (days)\"] = actDF[\"elapsed_time\"].sum() / (3600*24)\n summaryDF.loc[act, \"Average Duration (min)\"] = actDF[\"elapsed_time\"].mean() / 60\n summaryDF.loc[act, \"Total Moving Time (hours)\"] = actDF[\"moving_time\"].sum() / 3600\n summaryDF.loc[act, \"Total Moving Time (days)\"] = actDF[\"moving_time\"].sum() / (3600*24)\n summaryDF.loc[act, \"Average Moving Time (min)\"] = actDF[\"moving_time\"].mean() / 60\n summaryDF.loc[act, \"Average Speed (mph)\"] = (actDF[\"distance\"].sum() / actDF[\"moving_time\"].sum()) * 2.23694\n summaryDF.loc[act, \"Average Speed (kph)\"] = (actDF[\"distance\"].sum() / actDF[\"moving_time\"].sum()) * 3.6\n summaryDF.loc[act, \"Max Speed (mph)\"] = actDF[\"max_speed(mph)\"].max()\n summaryDF.loc[act, \"Max Speed (kph)\"] = actDF[\"max_speed(kph)\"].max()\n # We have to be careful anytime we want a specific date that something occured because\n # it may never have occurred and the result may be empty. That's why we do the following\n # five lines.\n s = actDF.loc[actDF[\"max_speed(mph)\"] == actDF[\"max_speed(mph)\"].max(), \"start_date\"]\n if not s.empty :\n summaryDF.loc[act, \"Max Speed Date\"] = s.iloc[0].date()\n else :\n summaryDF.loc[act, \"Max Speed Date\"] = None\n summaryDF.loc[act, \"Max Elevation Gain(ft)\"] = actDF[\"elevation_gain(ft)\"].max()\n summaryDF.loc[act, \"Max Elevation Gain(m)\"] = actDF[\"total_elevation_gain\"].max()\n s = actDF.loc[actDF[\"elevation_gain(ft)\"] == actDF[\"elevation_gain(ft)\"].max(), \"start_date\"]\n if not s.empty :\n summaryDF.loc[act, \"Max Elevation Gain Date\"] = s.iloc[0].date()\n else :\n summaryDF.loc[act, \"Max Elevation Gain Date\"] = None\n summaryDF.loc[act, \"Max Heart Rate\"] = actDF[\"max_heartrate\"].max()\n # We have to be careful with max heart rate because not all activities will have HR data.\n # The following code makes sure there is HR data before trying to access it.\n s = actDF.loc[actDF[\"max_heartrate\"] == actDF[\"max_heartrate\"].max(), \"start_date\"]\n if not s.empty :\n summaryDF.loc[act, \"Max HR Date\"] = s.iloc[0].date()\n else:\n summaryDF.loc[act, \"Max HR Date\"] = None\n\n # Summarize each activity by year\n act_summaryDF = pd.DataFrame(index=stats, columns = years)\n for y in years :\n subDF = actDF[(actDF[\"start_date\"] >= datetime.datetime(year = y, month = 1, day = 1, tzinfo=pytz.utc)) & (actDF[\"start_date\"] < datetime.datetime(year = y+1, month = 1, day = 1, tzinfo=pytz.utc))]\n # Need to check that we had any of this activity in the year.\n if not subDF.empty :\n act_summaryDF.loc[\"Total Distance (miles)\", y] = subDF[\"distance\"].sum() * 0.000621371\n act_summaryDF.loc[\"Total Distance (km)\", y] = subDF[\"distance\"].sum() / 1000\n act_summaryDF.loc[\"Total Elev. Gain (meters)\", y] = subDF[\"total_elevation_gain\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (ft)\", y] = subDF[\"total_elevation_gain\"].sum() * 3.28084\n act_summaryDF.loc[\"Total Elev. Gain (miles)\", y] = subDF[\"total_elevation_gain\"].sum() * 3.28084/5280\n act_summaryDF.loc[\"Total Elev. Gain (km)\", y] = subDF[\"total_elevation_gain\"].sum() / 1000\n act_summaryDF.loc[\"Total Duration (hours)\", y] = subDF[\"elapsed_time\"].sum() / 3600\n act_summaryDF.loc[\"Total Duration (days)\", y] = subDF[\"elapsed_time\"].sum() / (3600*24)\n act_summaryDF.loc[\"Average Duration (min)\", y] = subDF[\"elapsed_time\"].mean() / 60\n act_summaryDF.loc[\"Total Moving Time (hours)\", y] = subDF[\"moving_time\"].sum() / 3600\n act_summaryDF.loc[\"Total Moving Time (days)\", y] = subDF[\"moving_time\"].sum() / (3600*24)\n act_summaryDF.loc[\"Average Moving Time (min)\", y] = subDF[\"moving_time\"].mean() / 60\n act_summaryDF.loc[\"Average Speed (mph)\", y] = (subDF[\"distance\"].sum() / subDF[\"moving_time\"].sum()) * 2.23694\n act_summaryDF.loc[\"Average Speed (kph)\", y] = (subDF[\"distance\"].sum() / subDF[\"moving_time\"].sum()) * 3.6\n act_summaryDF.loc[\"Max Speed (mph)\", y] = subDF[\"max_speed(mph)\"].max()\n act_summaryDF.loc[\"Max Speed (kph)\", y] = subDF[\"max_speed(kph)\"].max()\n s = subDF.loc[subDF[\"max_speed(mph)\"] == subDF[\"max_speed(mph)\"].max(), \"start_date\"]\n if not s.empty:\n act_summaryDF.loc[\"Max Speed Date\", y] = s.iloc[0].date()\n else :\n act_summaryDF.loc[\"Max Speed Date\", y] = None\n\n act_summaryDF.loc[\"Max Elevation Gain(ft)\", y] = subDF[\"elevation_gain(ft)\"].max()\n act_summaryDF.loc[\"Max Elevation Gain(m)\", y] = subDF[\"total_elevation_gain\"].max()\n s = subDF.loc[subDF[\"elevation_gain(ft)\"] == subDF[\"elevation_gain(ft)\"].max(), \"start_date\"]\n if not s.empty :\n act_summaryDF.loc[\"Max Elevation Gain Date\", y] = s.iloc[0].date()\n else :\n act_summaryDF.loc[\"Max Elevation Gain Date\", y] = None\n act_summaryDF.loc[\"Max Heart Rate\", y] = subDF[\"max_heartrate\"].max()\n s = subDF.loc[subDF[\"max_heartrate\"] == subDF[\"max_heartrate\"].max(), \"start_date\"]\n if not s.empty :\n act_summaryDF.loc[\"Max HR Date\", y] = s.iloc[0].date()\n else:\n act_summaryDF.loc[\"Max HR Date\", y] = None\n # Add a few totals\n act_summaryDF.loc[\"Total Distance (miles)\", \"Total\"] = act_summaryDF.loc[\"Total Distance (miles)\"].sum()\n act_summaryDF.loc[\"Total Distance (km)\", \"Total\"] = act_summaryDF.loc[\"Total Distance (km)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (meters)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (meters)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (ft)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (ft)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (miles)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (miles)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (km)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (km)\"].sum()\n act_summaryDF.loc[\"Total Duration (hours)\", \"Total\"] = act_summaryDF.loc[\"Total Duration (hours)\"].sum()\n act_summaryDF.loc[\"Total Duration (days)\", \"Total\"] = act_summaryDF.loc[\"Total Duration (days)\"].sum()\n\n act_summaryDF.loc[\"Average Duration (min)\", \"Total\"] = summaryDF.loc[act, \"Average Duration (min)\"]\n act_summaryDF.loc[\"Total Moving Time (hours)\", \"Total\"] = act_summaryDF.loc[\"Total Moving Time (hours)\"].sum()\n act_summaryDF.loc[\"Total Moving Time (days)\", \"Total\"] = act_summaryDF.loc[\"Total Moving Time (days)\"].sum()\n act_summaryDF.loc[\"Average Moving Time (min)\", \"Total\"] = summaryDF.loc[act, \"Average Moving Time (min)\"]\n act_summaryDF.loc[\"Average Speed (mph)\", \"Total\"] = summaryDF.loc[act, \"Average Speed (mph)\"]\n act_summaryDF.loc[\"Average Speed (kph)\", \"Total\"] = summaryDF.loc[act, \"Average Speed (kph)\"]\n act_summaryDF.loc[\"Max Speed (mph)\", \"Total\"] = act_summaryDF.loc[\"Max Speed (mph)\"].max()\n act_summaryDF.loc[\"Max Speed (kph)\", \"Total\"] = act_summaryDF.loc[\"Max Speed (kph)\"].max()\n act_summaryDF.loc[\"Max Speed Date\", \"Total\"] = summaryDF.loc[act, \"Max Speed Date\"]\n act_summaryDF.loc[\"Max Elevation Gain(ft)\", \"Total\"] = summaryDF.loc[act, \"Max Elevation Gain(ft)\"]\n act_summaryDF.loc[\"Max Elevation Gain(m)\", \"Total\"] = summaryDF.loc[act, \"Max Elevation Gain(m)\"]\n act_summaryDF.loc[\"Max Elevation Gain Date\", \"Total\"] = summaryDF.loc[act, \"Max Elevation Gain Date\"]\n act_summaryDF.loc[\"Max Heart Rate\", \"Total\"] = summaryDF.loc[act, \"Max Heart Rate\"]\n act_summaryDF.loc[\"Max HR Date\", \"Total\"] = summaryDF.loc[act, \"Max HR Date\"]\n\n # Print the annual summary\n act_summaryDF.to_csv(act + \"-by-year.csv\")\n\n # Print the summary to a csv\n\n summaryDF.to_csv(\"strava-summary.csv\")", "def activity(self, activity):\n allowed_values = [\"PICKUP\", \"DROPOFF\", \"EXECUTE\", \"BREAK\"] # noqa: E501\n if activity not in allowed_values:\n raise ValueError(\n \"Invalid value for `activity` ({0}), must be one of {1}\" # noqa: E501\n .format(activity, allowed_values)\n )\n\n self._activity = activity", "def get_activity_list(self):\n return self._request_activity_list(self.athlete)", "async def get_user_activity(self, username: str) -> 'Response':\n headers = {\n 'Content-Type': 'application/json'\n }\n response = await self._client.request(method=RequestMethods.GET,\n url=USERS_ACTIVITY_URL.format(username=username),\n headers=headers)\n return response", "def event_activity_csv(event_id):\n limit = request.args.get('limit') or 50\n q = request.args.get('q') or None\n if q and len(q) < 3:\n q = None\n csvstream = gen_csv(get_event_activities(event_id, limit, q))\n headers = {'Content-Disposition': 'attachment; filename=activity_list.csv'}\n return Response(stream_with_context(csvstream),\n mimetype='text/csv', headers=headers)", "def get_activities(cls):\n objs = cls.objects\n return objs", "def get_activity(pt=None, activity_wt=None):\n if len(pt) != len(activity_wt):\n print(\"Pt. dimension doesn't match to desired dimension\")\n print('pt dim:', len(pt), 'required dim:', len(activity_wt))\n exit(1)\n arg = get_sigmoid(np.dot(pt, activity_wt))\n # arg = np.dot(pt, activity_wt)\n return arg", "def main(to_be_scheduled):\n\n tasks = order_by_ftime(to_be_scheduled)\n print select_activity(tasks)", "def search_activity(conn, request):\n\n c = conn.cursor()\n search_query = \"SELECT * FROM Activity T1 WHERE T1.Name LIKE ?\"\n c.execute(search_query, (request,))\n result = c.fetchall()\n return result", "def test_get_activity_template(self):\n pass", "def get(self, id):\n activity = Activity().get(id)\n if not activity:\n abort(404, \"Activity not found\")\n return activity", "def activities_list(self):\n self.__load_activities_from_file_into_memory()\n return self._activities_list", "def _get_future_from_activity_event(self, event):\n future = futures.Future() # state is PENDING.\n state = event['state']\n\n if state == 'scheduled':\n future._state = futures.PENDING\n elif state == 'schedule_failed':\n if event['cause'] == 'ACTIVITY_TYPE_DOES_NOT_EXIST':\n activity_type = swf.models.ActivityType(\n self.domain,\n name=event['activity_type']['name'],\n version=event['activity_type']['version'])\n logger.info('Creating activity type {} in domain {}'.format(\n activity_type.name,\n self.domain.name))\n try:\n activity_type.save()\n except swf.exceptions.AlreadyExistsError:\n logger.info(\n 'Activity type {} in domain {} already exists'.format(\n activity_type.name,\n self.domain.name))\n return None\n logger.info('failed to schedule {}: {}'.format(\n event['activity_type']['name'],\n event['cause'],\n ))\n return None\n elif state == 'started':\n future._state = futures.RUNNING\n elif state == 'completed':\n future._state = futures.FINISHED\n result = event['result']\n future._result = json.loads(result) if result else None\n elif state == 'canceled':\n future._state = futures.CANCELLED\n elif state == 'failed':\n future._state = futures.FINISHED\n future._exception = exceptions.TaskFailed(\n name=event['id'],\n reason=event['reason'],\n details=event.get('details'),\n )\n elif state == 'timed_out':\n future._state = futures.FINISHED\n future._exception = exceptions.TimeoutError(\n event['timeout_type'],\n event['timeout_value'])\n\n return future", "def create_activity(self, created_user, source, action,\n privacy=Privacy.PRIVATE, **kwargs):\n Activity = get_activity_model()\n return Activity.objects.create(\n about=self,\n action=action,\n created_user=created_user,\n source=source,\n privacy=privacy,\n **kwargs\n )", "def test_get_detailed_activity():\n tokens = get_tokens()\n activity = get_detailed_activity(4563031911, tokens)\n # this activity does not have a description\n assert activity.status_code == 200\n activity = activity.json()\n assert type(activity[\"id\"]) == int\n assert type(activity[\"distance\"]) == float\n assert type(activity[\"moving_time\"]) == int\n assert type(activity[\"elapsed_time\"]) == int\n assert type(activity[\"total_elevation_gain\"]) == float\n assert type(activity[\"elev_high\"]) == float\n assert type(activity[\"elev_low\"]) == float\n assert type(activity[\"type\"]) == str\n assert type(activity[\"start_date\"]) == str\n assert type(activity[\"average_speed\"]) == float\n assert type(activity[\"gear_id\"]) == str\n assert type(activity[\"description\"]) is type(None)\n activity = get_detailed_activity(4576599261, tokens)\n assert activity.status_code == 200\n activity = activity.json()\n # this activity has a description but I added it manually so there's no elev high or low\n assert type(activity[\"description\"]) == str\n\n assert type(activity[\"id\"]) == int\n assert type(activity[\"distance\"]) == float\n assert type(activity[\"moving_time\"]) == int\n assert type(activity[\"elapsed_time\"]) == int\n assert type(activity[\"total_elevation_gain\"]) == float\n assert type(activity[\"type\"]) == str\n assert type(activity[\"start_date\"]) == str\n assert type(activity[\"average_speed\"]) == float\n assert type(activity[\"gear_id\"]) == str", "def activity_logs(self) -> api.ActivityLogs:\n return self._get_model(model=api.ActivityLogs)", "def test_get_detailed_activity(self, Activity1, StravaTokens1):\n self.mock_get.return_value = Mock(ok=True)\n self.mock_get.return_value.json.return_value = Activity1\n strava_tokens = StravaTokens1\n response = get_detailed_activity(12345678987654321, strava_tokens)\n assert response.ok is True\n assert response.json() == Activity1", "def add_activity(self, activity, table):\n week = self.t.timeline[\"week\" + str(self.week)]\n self.t.add_activity(week, activity)\n self.clear_frame(table)\n self.show_table(self.t.timeline[\"week\" + str(self.week)], table)", "def create_activities(app_state) -> list:\n activity_specs = app_state.personality.get_states()\n activity_objects = []\n if len(activity_specs) == 0:\n raise ValueError(\"No activities found.\")\n for activity_spec in activity_specs:\n this_object = create_activity(activity_spec, app_state)\n activity_objects.append(this_object)\n end_state_object = create_activity(end_state_config, app_state)\n activity_objects.append(end_state_object)\n return activity_objects", "def asset_activity(self, asset_id):\n response = self._client.get('workbenches/assets/%(asset_id)s/activity',\n path_params={'asset_id': asset_id})\n return AssetActivityList.from_json(response.text)", "def activities_to_jsonfeed(activities, actor=None, title=None, feed_url=None,\n home_page_url=None):\n try:\n iter(activities)\n except TypeError:\n raise TypeError('activities must be iterable')\n\n if isinstance(activities, (dict, str)):\n raise TypeError('activities may not be a dict or string')\n\n def image_url(obj):\n return util.get_first(obj, 'image', {}).get('url')\n\n def actor_name(obj):\n return obj.get('displayName') or obj.get('username')\n\n if not actor:\n actor = {}\n\n items = []\n for activity in activities:\n obj = as1.get_object(activity) or activity\n if obj.get('objectType') == 'person':\n continue\n author = as1.get_object(obj, 'author')\n content = microformats2.render_content(\n obj, include_location=True, render_attachments=True,\n # Readers often obey CSS white-space: pre strictly and don't even line wrap,\n # so don't use it. https://github.com/snarfed/granary/issues/456\n white_space_pre=False)\n obj_title = obj.get('title') or obj.get('displayName')\n item = {\n 'id': obj.get('id') or obj.get('url'),\n 'url': obj.get('url'),\n 'image': image_url(obj),\n 'title': obj_title if mf2util.is_name_a_title(obj_title, content) else None,\n 'summary': obj.get('summary'),\n 'content_html': content,\n 'date_published': obj.get('published'),\n 'date_modified': obj.get('updated'),\n 'author': {\n 'name': actor_name(author),\n 'url': author.get('url'),\n 'avatar': image_url(author),\n },\n 'attachments': [],\n }\n\n for att in obj.get('attachments', []):\n url = util.get_url(att, 'stream') or util.get_url(att, 'image')\n mime = mimetypes.guess_type(url)[0] if url else None\n if (att.get('objectType') in ATTACHMENT_TYPES or\n mime and mime.split('/')[0] in ATTACHMENT_TYPES):\n item['attachments'].append({\n 'url': url or '',\n 'mime_type': mime,\n 'title': att.get('title'),\n })\n\n if not item['content_html']:\n item['content_text'] = ''\n items.append(item)\n\n return util.trim_nulls({\n 'version': 'https://jsonfeed.org/version/1',\n 'title': title or actor_name(actor) or 'JSON Feed',\n 'feed_url': feed_url,\n 'home_page_url': home_page_url or actor.get('url'),\n 'author': {\n 'name': actor_name(actor),\n 'url': actor.get('url'),\n 'avatar': image_url(actor),\n },\n 'items': items,\n }, ignore='content_text')", "def __init__(self, log=None):\n sppasBaseAnnotation.__init__(self, \"activity.json\", log)\n self.__activity = Activity()", "def test_workflows_change_stream_get(self):\n pass", "def post_activities():\n pass", "def get_activityType(activity_task):\n try:\n return activity_task[\"activityType\"][\"name\"]\n except KeyError:\n # No activityType found\n return None", "def set_current_activity(client, activity_label):\n\n id = activities_by_name[activity_label]\n func = client.start_activity(id)\n status = run_in_loop_now('start_activity', func)\n return status", "def orchestrator_function(context: df.DurableOrchestrationContext) -> List[str]:\r\n\r\n\r\n logging.debug(\"Creating the orchestrator function\")\r\n\r\n json_rule = {\r\n \"condition\": {\r\n \"wait_events\": [\"A\",\"B\"],\r\n \"logic\": \"and\"\r\n },\r\n \"satisfied\":[\r\n {\r\n \"activity_func_name\": \"SuccessActions\",\r\n \"args\": {\r\n \"name\": \"Tokyo\"\r\n }\r\n }\r\n ]\r\n }\r\n\r\n tasks = []\r\n for event in json_rule[\"condition\"][\"wait_events\"]:\r\n tasks.append(context.wait_for_external_event(event))\r\n logging.debug(\"Added event {} to list of tasks\".format(event))\r\n\r\n if json_rule[\"condition\"][\"logic\"] == 'and':\r\n logging.info(\"A logical <and> rule was found\")\r\n yield context.task_all(tasks)\r\n elif json_rule[\"condition\"][\"logic\"] == 'or':\r\n logging.info(\"A logical <or> rule was found\")\r\n yield context.task_any(tasks)\r\n\r\n output = []\r\n for action in json_rule[\"satisfied\"]:\r\n result = yield context.call_activity(action[\"activity_func_name\"], action[\"args\"])\r\n output.append(result)\r\n\r\n return output", "def query_user_activities(username):\n github_endpoint = 'https://api.github.com/users/{}/events/public'.format(username)\n return requests.get(url=github_endpoint).text", "def test_api_get_activity_by_id(self):\n # create a bucket\n res = self.register_login_get_token()\n self.assertEqual(res.status_code, 201)\n\n # create a activity\n res = self.client().post('/bucketlist/1/activities',\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.activity)\n self.assertEqual(res.status_code, 201)\n # get activity created\n activity_created = json.loads(res.data.decode())\n # get activity by its ID\n res = self.client().get('/bucketlist/1/activities/{}'.format(activity_created['id']),\n headers=dict(\n Authorization=\"Bearer \" + self.access_token))\n self.assertEqual(res.status_code, 200)\n self.assertIn('Shop in', str(res.data))", "def __ui_add_new_activity(self):\n activity_id = int(input(\"Activity ID: \"))\n existing_persons_ids = self.__person_service.get_existing_persons_ids()\n string_of_participants_ids = input(\n f\"Participants' IDs (you can choose from the list: {existing_persons_ids})\\n > \")\n list_of_participants_ids = self.__ui_convert_ids_string_to_list(string_of_participants_ids)\n activity_description = input(\"Describe the activity: \")\n activity_date = {\n \"year\": int(input(\"Year: \")),\n \"month\": int(input(\"Month: \")),\n \"day\": int(input(\"Day: \"))\n }\n activity_time = int(input(\"Time: \"))\n\n self.__activity_service.service_add_activity(activity_id,\n list_of_participants_ids,\n activity_date,\n activity_time,\n activity_description)\n print(\"Activity successfully added to your agenda!\\n\")", "def get_activities_dictionary(self):\r\n activities_dict_list = list()\r\n activities = self.get_specific_node_list('activity')\r\n for activity in activities:\r\n activities_dict = dict()\r\n activity_name = None\r\n category = None\r\n for key, val in activity.attrib.iteritems():\r\n if \"}name\" in key:\r\n activity_name = val.split(\".\")[-1]\r\n break\r\n if activity_name:\r\n intent_filter_node = self.get_specific_node_list('intent-filter', root_node=activity)\r\n if len(intent_filter_node) == 1:\r\n categories_nodes = self.get_specific_node_list('category', root_node=intent_filter_node[0])\r\n category = self.get_category_value(categories_nodes)\r\n else:\r\n category = None\r\n activities_dict[\"name\"] = activity_name\r\n activities_dict[\"category\"] = category\r\n activities_dict_list.append(activities_dict)\r\n return activities_dict_list", "def test_activity_flag(self, example_staypoints):\n # take out staypoint 6 that should have been merged with 2, 15\n sp = example_staypoints\n data = [True, True, True, True, False, True, True, True]\n idx = [1, 2, 3, 5, 6, 7, 15, 80]\n activities = pd.Series(data, index=idx)\n sp[\"activity\"] = activities\n sp, _ = sp.as_staypoints.generate_locations(\n method=\"dbscan\",\n epsilon=10,\n num_samples=2,\n distance_metric=\"haversine\",\n agg_level=\"user\",\n activities_only=True,\n )\n assert sp.loc[1, \"location_id\"] == sp.loc[15, \"location_id\"]\n assert sp.loc[2, \"location_id\"] is pd.NA", "def _input_fn(input_pipeline_context=None):\n return _create_dataset(options, is_training, input_pipeline_context)", "def get_start_activities():\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n dictio = lh.get_handler_for_process_and_session(process, session).get_start_activities()\n for entry in dictio:\n dictio[entry] = int(dictio[entry])\n list_act = sorted([(x, y) for x, y in dictio.items()], key=lambda x: x[1], reverse=True)\n return jsonify({\"startActivities\": list_act})\n return jsonify({\"startActivities\": []})" ]
[ "0.6528125", "0.64868456", "0.60511726", "0.6050907", "0.59809947", "0.5972801", "0.5858322", "0.5832111", "0.57496256", "0.57023674", "0.56348556", "0.56348556", "0.5538348", "0.5492945", "0.54754126", "0.5414958", "0.53552705", "0.53485847", "0.53307176", "0.5305301", "0.52928853", "0.52818364", "0.5280065", "0.52604574", "0.52337945", "0.5232585", "0.5226021", "0.52201056", "0.51525795", "0.50891083", "0.50884473", "0.5076833", "0.50534415", "0.50534415", "0.50534415", "0.50534415", "0.504001", "0.5028233", "0.5021119", "0.5020458", "0.5010835", "0.5000876", "0.49784046", "0.49743617", "0.49697736", "0.49679047", "0.4927048", "0.49105868", "0.4909181", "0.4906805", "0.48990065", "0.48728094", "0.48700565", "0.48591805", "0.4856319", "0.48553854", "0.48377454", "0.4815638", "0.4812732", "0.48105672", "0.4809659", "0.48084936", "0.48074722", "0.4800167", "0.47946107", "0.47909692", "0.4790435", "0.47898036", "0.47877023", "0.47874084", "0.47825843", "0.4777788", "0.47686476", "0.47668225", "0.47560441", "0.47519273", "0.4751803", "0.4743932", "0.47291213", "0.4727797", "0.47188896", "0.47155923", "0.47151718", "0.47111404", "0.4707871", "0.47056374", "0.46924675", "0.46841246", "0.4675414", "0.46704575", "0.46668112", "0.46664107", "0.46661437", "0.4657628", "0.464087", "0.46403524", "0.46389583", "0.46350724", "0.46341258", "0.46309656" ]
0.6439963
2
Provides a Step Functions Activity data source Example Usage ```python import pulumi import pulumi_aws as aws sfn_activity = aws.sfn.get_activity(name="myactivity") ```
def get_activity_output(arn: Optional[pulumi.Input[Optional[str]]] = None, name: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetActivityResult]: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_activities():\n pass", "def get_activity(arn: Optional[str] = None,\n name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetActivityResult:\n __args__ = dict()\n __args__['arn'] = arn\n __args__['name'] = name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('aws:sfn/getActivity:getActivity', __args__, opts=opts, typ=GetActivityResult).value\n\n return AwaitableGetActivityResult(\n arn=pulumi.get(__ret__, 'arn'),\n creation_date=pulumi.get(__ret__, 'creation_date'),\n id=pulumi.get(__ret__, 'id'),\n name=pulumi.get(__ret__, 'name'))", "def construct_strava_activity_data(activity):\n # if the timestamp has been saved then use this over converting the other one\n # issues with server tz so better to use the timestamp at the point the activity record was created\n if activity.iso_timestamp:\n local_time = activity.iso_timestamp\n else:\n local_time = activity.local_timestamp.isoformat()\n\n data = {'name': activity.title,\n 'type': STRAVA_ACTIVITIES_LOOKUP[activity.type],\n 'start_date_local': local_time,\n 'elapsed_time': activity.duration * 60, # need to convert to seconds, stored in db as minutes\n 'description': activity.description}\n\n if activity.distance is not None and activity.distance > 0:\n data['distance'] = activity.distance * 1000 # Strava API requires distance in m, stored in db as km\n\n return data", "def _read_activity(session_path: Path):\n # Read activity file\n df_act = pd.read_csv(\n session_path / ACTIVITY_FILE,\n names=ACTIVITY_FILE_COLUMNS,\n usecols=[\n \"subject\",\n \"session_number\",\n \"start_time\",\n \"end_time\",\n \"gesture_scenario\",\n \"task_id\",\n ],\n header=None,\n engine=\"c\",\n )\n # Timestamps as additional datetime columns\n df_act[\"start_time_dt\"] = pd.to_datetime(df_act[\"start_time\"], unit=\"ms\")\n df_act[\"end_time_dt\"] = pd.to_datetime(df_act[\"end_time\"], unit=\"ms\")\n\n return df_act", "def get_activity(variable):\n project = variable['project']\n try:\n exp = variable['exp']\n if isinstance(exp, list):\n return [CMOR_TABLES[project].activities[value][0] for value in exp]\n return CMOR_TABLES[project].activities[exp][0]\n except (KeyError, AttributeError):\n return None", "def get_activities(ts_activity, access_token):\n params = {'after': ts_activity, 'access_token': access_token}\n url = \"https://www.strava.com/api/v3/activities\"\n response = return_json(url, \"GET\", parameters=params)\n return response", "def activity():\n return {\n \"type\": \"class\",\n \"base\": None,\n \"is_abstract\": True,\n \"is_document\": True,\n \"pstr\": (\"{}\", (\"canonical_name\",)),\n \"properties\": [\n (\n \"alternative_names\",\n \"str\",\n \"0.N\",\n \"List of names by which the activity is also known.\",\n ),\n (\n \"canonical_name\",\n \"str\",\n \"0.1\",\n \"Community defined identifier or name.\",\n ),\n (\n \"citations\",\n \"linked_to(shared.citation)\",\n \"0.N\",\n \"Set of pertinent citations.\",\n ),\n (\n \"description\",\n \"str\",\n \"0.1\",\n \"Description of what is to be done (or was done).\",\n ),\n (\n \"duration\",\n \"time.time_period\",\n \"0.1\",\n \"Time the activity was (or will be) active.\",\n ),\n (\n \"internal_name\",\n \"str\",\n \"0.1\",\n \"A name used for internal purposes.\",\n ),\n (\"keywords\", \"str\", \"0.1\", \"User defined keywords.\"),\n (\"long_name\", \"str\", \"0.1\", \"Longer version of activity name.\"),\n (\"name\", \"str\", \"1.1\", \"Short name or abbreviation.\"),\n (\n \"responsible_parties\",\n \"shared.responsibility\",\n \"0.N\",\n \"People or organisations responsible for activity.\",\n ),\n (\n \"previously_known_as\",\n \"str\",\n \"0.N\",\n \"List of names by which the activity was formerly known.\",\n ),\n (\n \"rationale\",\n \"str\",\n \"0.1\",\n \"Explanation of why this activity was carried out and/or what \"\n \"it was intended to achieve.\",\n ),\n ],\n }", "def get_activity_object(activity_name, settings, logger, conn, token, activity_task):\n full_path = \"activity.\" + activity_name + \".\" + activity_name\n f = eval(full_path)\n # Create the object\n activity_object = f(settings, logger, conn, token, activity_task)\n return activity_object", "def activity(self, activity_id):\r\n return resources.Activity(self, activity_id)", "def _request_activity_data(self, athlete, filename):\n response = self._get_request(self._activity_endpoint(athlete, filename)).json()\n\n activity = pd.DataFrame(response['RIDE']['SAMPLES'])\n activity = activity.rename(columns=ACTIVITY_COLUMN_TRANSLATION)\n\n activity.index = pd.to_timedelta(activity.time, unit='s')\n activity.drop('time', axis=1, inplace=True)\n\n return activity[[i for i in ACTIVITY_COLUMN_ORDER if i in activity.columns]]", "def test_get_activity(self):\n pass", "def test_get_activity(self):\n pass", "def get_activity():\n try:\n activity = Activity.objects.filter(active=1).latest('id')\n except Activity.DoesNotExist:\n activity = None\n return activity", "def get_activity(self, filename):\n return self._request_activity_data(self.athlete, filename)", "def get_continuous_activity(self):\n from .continuousactivity import DSSContinuousActivity\n return DSSContinuousActivity(self.client, self.project_key, self.recipe_name)", "def get_activity_stream(token, activity, types, series_type='time', resolution='high'):\n types = ','.join(types)\n params = {'access_token': token}\n url = f'https://www.strava.com/api/v3/activities/{activity}/streams/{types}&series_type={series_type}&resolution={resolution}&key_by_type='\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response", "def test_get_activities(self):\n pass", "def activity(self, activity_id):\r\n return activities.Activity(self, activity_id)", "def getactivity(self) -> Optional[ba.Activity]:\n stats = self._stats()\n if stats is not None:\n return stats.getactivity()\n return None", "def by_activity(cls,site_id=0,activity=None):\n return meta.DBSession.query(Activity).filter_by(site_id=site_id,activity=activity).all()", "def get_current_activity(client):\n func = client.get_current_activity()\n activity_id = run_in_loop_now('get_current_activity', func)\n label = activities_by_id[str(activity_id)]\n return label", "def getUserActivities(context, request):\n mmdb = MADMaxDB(context.db)\n query = {}\n query['actor.username'] = request.actor['username']\n query['verb'] = 'post'\n chash = request.params.get('context', None)\n if chash:\n query['contexts.hash'] = chash\n\n is_head = request.method == 'HEAD'\n activities = mmdb.activity.search(query, sort=\"_id\", keep_private_fields=False, flatten=1, count=is_head, **searchParams(request))\n\n handler = JSONResourceRoot(activities, stats=is_head)\n return handler.buildResponse()", "def activity(self):\n return self._activity", "def fetch(self, activity):\n return None, None", "def get_activity_name(activityType):\n return \"activity_\" + activityType", "def get_activities(self, user_id=None, group_id=None, app_id=None,\n activity_id=None, start_index=0, count=0):\n raise NotImplementedError()", "def manipulate_activity():\n pass", "def get_activity_data(self, rid):\n raise NotImplementedError", "def fetch_github_activity(gen, metadata):\n\n if \"GITHUB_ACTIVITY_FEED\" in gen.settings.keys():\n gen.context[\"github_activity\"] = gen.plugin_instance.fetch()", "def getActivity(self):\n return self.activity", "def convert_activity(ast):\n\n if len(ast.args) > 1:\n logger.error(f\"Activity should not have more than 1 argument {ast.to_string()}\")\n\n p_arg = ast.args[0] # protein argument\n print(\"p_arg\", p_arg)\n ma_arg = Function(\"ma\", version=version)\n ma_arg.add_argument(StrArg(ast.name, ma_arg))\n p_arg.change_parent_fn(ma_arg)\n ast = Function(\"activity\", version=version)\n p_arg.change_parent_fn(ast)\n ast.add_argument(p_arg)\n ast.add_argument(ma_arg)\n\n return ast", "def all_activity(self):\n\t\tself.db = DB()\n\t\tactivity_all = self.db.select_all_from(\"activity\")\n\t\ttmpl = lookup.get_template(\"activity.html\")\n\t\treturn (tmpl.render(activity=activity_all))", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def start_activity(self,param={},ignore_error_handle = False):\n message = {};\n step = 'start activity by app package \\'' + param.get('package') + '\\' and activity name \\'' + param.get('activity') + '\\'';\n package = param.get('package');\n activity = param.get('activity');\n try:\n self.driver.start_activity(package,activity);\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def getactivity(self) -> Optional[ba.Activity]:\n if self._activity is None:\n return None\n return self._activity()", "def activities(self):\r\n return resources.Activities(self)", "def action(self):\n return self.rowTime.activity", "def unserialise_activity_json(pathName):\n to_return = []\n\n try:\n with open(pathName) as json_file:\n json_data = json.load(json_file)\n\n for activity in json_data[\"data\"]:\n to_return.append(\n Activity(\n activity[\"ComInsee\"],activity[\"ComLib\"],\n activity[\"EquipementId\"],activity[\"EquNbEquIdentique\"],\n activity[\"ActCode\"],\n activity[\"ActLib\"],activity[\"EquActivitePraticable\"],\n activity[\"EquActivitePratique\"],\n activity[\"EquActiviteSalleSpe\"],\n activity[\"ActNivLib\"]\n )\n )\n except FileNotFoundError:\n print(\"bad path of activity json file\")\n except KeyError:\n print(\"bad json file, see documentation of activity for see how is construct this object\")\n return to_return", "def test_get_activity(self):\n activity = self.client.get_activity(96089609)\n self.assertEquals('El Dorado County, CA, USA', activity.location_city)\n\n self.assertIsInstance(activity.start_latlng, attributes.LatLon)\n self.assertAlmostEquals(-120.4357631, activity.start_latlng.lon, places=2)\n self.assertAlmostEquals(38.74263759999999, activity.start_latlng.lat, places=2)\n\n self.assertIsInstance(activity.map, model.Map)\n\n self.assertIsInstance(activity.athlete, model.Athlete)\n self.assertEquals(1513, activity.athlete.id)\n\n #self.assertAlmostEqual(first, second, places, msg, delta)\n # Ensure that iw as read in with correct units\n self.assertEquals(22.5308, float(uh.kilometers(activity.distance)))", "def get_input(activity_task):\n try:\n input = json.loads(activity_task[\"input\"])\n except KeyError:\n input = None\n return input", "def return_activity(end):\n if end == \"Seattle\":\n return Seattle_Activities\n if end == \"San Francisco\":\n return SanFrancisco_Activities\n if end == \"Los Angeles\":\n return LosAngeles_Activities\n if end == \"Las Vegas\":\n return LasVegas_Activities\n if end == \"Portland\":\n return Portland_Activities\n else:\n return SanDiego_Activities", "def load_vocal_activity(fhandle: TextIO) -> annotations.EventData:\n begs = [] # timestamps of vocal-instrument activity beginnings\n ends = [] # timestamps of vocal-instrument activity endings\n events = [] # vocal-instrument activity labels\n\n reader = csv.reader(fhandle, delimiter=\"\\t\")\n raw_data = []\n for line in reader:\n if line[0] != \"Piece No.\":\n raw_data.append(line)\n\n for i in range(len(raw_data)):\n # Parsing vocal-instrument activity as intervals (beg, end, event)\n if raw_data[i] != raw_data[-1]:\n begs.append(float(raw_data[i][0]))\n ends.append(float(raw_data[i + 1][0]))\n events.append(raw_data[i][1])\n\n return annotations.EventData(np.array([begs, ends]).T, \"s\", events, \"open\")", "def activities(self):\r\n return v3.Activities(self)", "def test_get_activity_occurrence_details(self):\n pass", "def update_activity():\n pass", "def _activity(\n run, baseline_activity=0., baseline_sigma=3.0,\n trace_type='deconvolved'):\n if trace_type != 'deconvolved':\n raise ValueError(\n 'Temporal classifier only implemented for deconvolved data.')\n\n if run.run_type == 'spontaneous' and 'sated' in run.tags:\n runs = run.parent.runs(run_types=['spontaneous'], tags=['sated'])\n spontaneous = True\n elif run.run_type == 'spontaneous' and 'hungry' in run.tags:\n runs = run.parent.runs(run_types=['spontaneous'], tags=['hungry'])\n spontaneous = True\n elif run.run_type == 'training':\n runs = run.parent.runs(run_types=['training'])\n spontaneous = False\n else:\n raise ValueError(\n 'Unknown run_type and tags, not sure how to calculate activity.')\n\n baseline, variance, outliers = None, None, None\n if spontaneous:\n popact, outliers = [], []\n for r in runs:\n t2p = r.trace2p()\n pact = t2p.trace('deconvolved')\n fmin = t2p.lastonset()\n mask = t2p.inactivity()\n mask[:fmin] = False\n\n if len(popact):\n popact = np.concatenate([popact, pact[:, mask]], axis=1)\n else:\n popact = pact[:, mask]\n\n trs = t2p.trace('deconvolved')[:, fmin:]\n cellact = np.nanmean(trs, axis=1)\n outs = cellact > np.nanmedian(cellact) + 2*np.std(cellact)\n\n if len(outliers) == 0:\n outliers = outs\n else:\n outliers = np.bitwise_or(outliers, outs)\n\n if len(popact):\n popact = np.nanmean(popact[np.invert(outliers), :], axis=0)\n\n baseline = np.median(popact)\n variance = np.std(popact)\n outliers = outliers\n else:\n popact = []\n for r in runs:\n t2p = r.trace2p()\n ncells = t2p.ncells\n pact = np.nanmean(t2p.trace('deconvolved'), axis=0)\n skipframes = int(t2p.framerate*4)\n\n for cs in ['plus*', 'neutral*', 'minus*', 'pavlovian*']:\n onsets = t2p.csonsets(cs)\n for ons in onsets:\n pact[ons:ons+skipframes] = np.nan\n popact = np.concatenate([popact, pact[np.isfinite(pact)]])\n\n if len(popact):\n # baseline = np.median(popact)\n\n # Exclude extremes\n percent = 2.0\n popact = np.sort(popact)\n trim = int(percent*popact.size/100.)\n popact = popact[trim:-trim]\n\n baseline = np.median(popact) # Moved to after extreme exclusion on 190326\n variance = np.std(popact)\n outliers = np.zeros(ncells, dtype=bool)\n\n if baseline is None:\n baseline, variance = 0.01, 0.08*baseline_sigma\n else:\n baseline *= baseline_activity\n variance *= baseline_sigma\n\n return baseline, variance, outliers", "def activity(name):\n\n name_split = name.split(\",\")\n if \"Irrigation\" in name and \"gal\" not in name_split[1]:\n n = name_split[0] + \",\" + name_split[1]\n else:\n n = name_split[0]\n\n if \" to \" in n:\n activity = n.split(\" to \")\n name = split_name(activity[0])\n produced = name[0]\n consumed = capitalize_first_letter(activity[1])\n elif \" from \" in n:\n if \")\" in n:\n open_paren_split = n.split(\"(\")\n capitalized_string = capitalize_first_letter(open_paren_split[0])\n close_paren_split = open_paren_split[1].split(\")\")\n produced_split = close_paren_split[1].split(\" from \")\n produced = capitalize_first_letter(produced_split[1].strip())\n consumed = capitalized_string.strip() + \" \" + close_paren_split[0].strip()\n else:\n activity = n.split(\" from \")\n name = split_name(activity[0])\n produced = capitalize_first_letter(activity[1])\n consumed = name[0].strip()\n elif \"consumptive\" in n:\n if \")\" in n:\n open_paren_split = n.split(\"(\")\n capitalized_string = capitalize_first_letter(open_paren_split[0])\n close_paren_split = open_paren_split[1].split(\")\")\n produced = capitalized_string.strip() + \" \" + close_paren_split[0].strip()\n consumed = None\n else:\n split_case = split_name(n)\n consumed = None\n produced = capitalize_first_letter(split_case[0])\n elif \")\" in n:\n produced = None\n open_paren_split = n.split(\"(\")\n capitalized_string = capitalize_first_letter(open_paren_split[0])\n close_paren_split = open_paren_split[1].split(\")\")\n consumed = capitalized_string.strip() + \" \" + close_paren_split[0].strip()\n elif \"total deliveries\" in n:\n split_case = split_name(n)\n consumed = None\n produced = capitalize_first_letter(split_case[0])\n elif \"Self-supplied\" in n:\n split_case = split_name(n)\n produced = None\n consumed = capitalize_first_letter(split_case[1])\n else:\n split_case = split_name(n)\n produced = None\n consumed = capitalize_first_letter(split_case[0])\n return pd.Series([produced, consumed])", "def show_activities(self): \n database = Database('data/database.db')\n activities = database.read_activities()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name] for item in activities],\n pageTitle = \"Activités\",\n tableTitle = \"Liste de toutes les activités\",\n ths = [\"Numéro\", \"Nom\"]\n )", "def get_activities(self, type=None):\n return flattrclient._get_query_dict(type=type)", "def create_activity(request: Request, activity_type: str, msg_context: dict, object_id: UUID, user: User):\n dbsession = Session.object_session(user)\n\n stream = Stream.get_or_create_user_stream(user)\n\n a = Activity()\n a.object_id = object_id\n a.activity_type = activity_type\n a.msg_context = msg_context\n\n stream.activities.append(a)\n dbsession.flush()\n\n return a", "def create_activity(activity_id):\n\n # get the activity\n activity = Activity.query.filter_by(id=activity_id).first()\n\n if activity:\n # now get a valid token for the associated user\n access_token = refresh_access_token(user_id=activity.user_id)\n if access_token is None:\n # an error must have occurred\n current_app.logger.error('Cannot save activity {} to Strava as unable to refresh token'.format(activity_id))\n # let the app continue on as error has been logged\n return 200\n\n url = 'https://www.strava.com/api/v3/activities'\n headers = {'Authorization': 'Bearer {}'.format(access_token)}\n\n data = construct_strava_activity_data(activity)\n response = requests.post(url, headers=headers, data=data)\n strava_athlete = StravaAthlete.query.filter_by(user_id=activity.user_id).first()\n log_strava_event(strava_athlete.athlete_id, \"Activity\")\n\n # check the response, if there has been an error then need to log this\n if response.status_code != 200:\n current_app.logger.error('Strava Status code: {}'.format(response.status_code))\n current_app.logger.error('Strava Response: {}'.format(response.json))\n return response.status_code\n # log an error if the activity doesn't exist but allow app to continue on\n current_app.logger.error('Activity {} does not exist'.format(activity_id))\n return 200", "def get_activities_by_session_id(self, session_id):\n return self._db.get_all(\"\"\"\n SELECT * FROM activity_log\n WHERE session_id = ?\"\"\", (session_id, ))", "def test_days_weeks_activity():\n assert analytics.activity('daily', yoga_trackings(), 1) == 17\n assert analytics.activity('weekly', run_trackings(), 1) == 4\n assert analytics.activity('daily', read_trackings(), 1) == 18\n assert analytics.activity('daily', meditation_trackings(), 1) == 15\n assert analytics.activity('weekly', french_trackings(), 1) == 5", "def getOLAPSource():", "def get_activity_name(activity):\n for key in activity.attrib.keys():\n if key.endswith(\"name\"):\n return activity.attrib[key]", "def get_activitytosector_mapping(source, fbsconfigpath=None):\n from flowsa.settings import crosswalkpath\n # identify mapping file name\n mapfn = f'NAICS_Crosswalk_{source}'\n\n # if FBS method file loaded from outside the flowsa directory, check if\n # there is also a crosswalk\n external_mappingpath = f\"{fbsconfigpath}activitytosectormapping/\"\n if os.path.exists(external_mappingpath):\n activity_mapping_source_name = get_flowsa_base_name(\n external_mappingpath, mapfn, 'csv')\n if os.path.isfile(f\"{external_mappingpath}\"\n f\"{activity_mapping_source_name}.csv\"):\n log.info(f\"Loading {activity_mapping_source_name}.csv \"\n f\"from {external_mappingpath}\")\n crosswalkpath = external_mappingpath\n activity_mapping_source_name = get_flowsa_base_name(\n crosswalkpath, mapfn, 'csv')\n mapping = pd.read_csv(f'{crosswalkpath}{activity_mapping_source_name}.csv',\n dtype={'Activity': 'str', 'Sector': 'str'})\n # some mapping tables will have data for multiple sources, while other\n # mapping tables are used for multiple sources (like EPA_NEI or BEA\n # mentioned above) so if find the exact source name in the\n # ActivitySourceName column use those rows if the mapping file returns\n # empty, use the original mapping file subset df to keep rows where\n # ActivitySourceName matches source name\n mapping2 = mapping[mapping['ActivitySourceName'] == source].reset_index(\n drop=True)\n if len(mapping2) > 0:\n return mapping2\n else:\n return mapping", "def load_exported_activities() -> List[DiscoveredActivities]:\n activities = []\n activities.extend(discover_actions(\"chaosgcp.gke.nodepool.actions\"))\n activities.extend(discover_probes(\"chaosgcp.gke.nodepool.probes\"))\n activities.extend(discover_actions(\"chaosgcp.sql.actions\"))\n activities.extend(discover_probes(\"chaosgcp.sql.probes\"))\n activities.extend(discover_probes(\"chaosgcp.storage.probes\"))\n activities.extend(discover_actions(\"chaosgcp.cloudbuild.actions\"))\n activities.extend(discover_probes(\"chaosgcp.cloudbuild.probes\"))\n activities.extend(discover_actions(\"chaosgcp.cloudrun.actions\"))\n activities.extend(discover_probes(\"chaosgcp.cloudrun.probes\"))\n activities.extend(discover_probes(\"chaosgcp.monitoring.probes\"))\n activities.extend(discover_probes(\"chaosgcp.cloudlogging.probes\"))\n activities.extend(discover_probes(\"chaosgcp.artifact.probes\"))\n activities.extend(discover_actions(\"chaosgcp.lb.actions\"))\n return activities", "def activity(self, activity):\n if activity is None:\n raise ValueError(\"Invalid value for `activity`, must not be `None`\") # noqa: E501\n\n self._activity = activity", "def get_activity(self, type=False,user=False,date_min=False,date_max=False):\n\n if type:\n return self.execute(TABELLE['activity']['select']['by_type'],(type,))\n elif user:\n return self.execute(TABELLE['activity']['select']['by_user'],(user,))\n elif date_min:\n return self.execute(TABELLE['activity']['select']['by_date_min'],(date_min,))\n elif date_max:\n return self.execute(TABELLE['activity']['select']['by_date_max'],(date_max,))\n # se le chiavi sono tutte false allora prendo tutte le activity\n elif not type and not user and not date_max and not date_min:\n return self.execute(TABELLE['activity']['select']['all'])\n else:\n return False", "def transform_data_for_user_activities(df: DataFrame) -> Tuple[DataFrame, str]:\n user_activities_df = df[['user_id', 'time_stamp', 'url_level1', 'url_level2', 'url_level3', 'activity']]\n return user_activities_df, USER_ACTIVITIES_OUTPUT_FILENAME", "def _activity_endpoint(self, athlete, filename):\n return '{host}{athlete}/activity/{filename}'.format(\n host=self.host,\n athlete=quote_plus(athlete),\n filename=filename\n )", "def add_strava_data_to_activities(self):\n \n try:\n logging.info(\"Parsing Strava data and getting it ready for analysis.\")\n\n strava_activities = self.strava_fetcher.fetch_strava_activities()\n if strava_activities == None:\n logging.info(\"No Strava data to add to all activities\")\n return\n\n strava_data = json.dumps(strava_activities)\n \n # load strava data straight up from json, not doing any json normalization\n strava_df = pd.read_json(strava_data)\n strava_df = strava_df[['distance', \n 'elapsed_time', \n 'start_date_local', \n 'location_city', \n 'average_speed', \n 'max_speed', \n 'type']]\n\n # set up 5 key metrics\n # note we're using the enum value\n strava_df['activity_type'] = strava_df['type'].apply(lambda x: self.convert_strava_activity_type(x).value)\n strava_df['source'] = ActivitySource.STRAVA.value\n strava_df['start_timestamp'] = strava_df['start_date_local'].apply(lambda x: parse(x, tzinfos={\"America/Vancouver\"}))\n # strava distances are in meters\n strava_df['distance_in_km'] = strava_df['distance'].apply(lambda x: x / 1000)\n strava_df['duration_in_min'] = strava_df['elapsed_time'].apply(lambda x: x / 60)\n\n # filter out extraneous columns\n strava_df = strava_df.filter(self.data_frame_columns)\n\n # add to activities\n self.all_activities = self.all_activities.append(strava_df, sort=True)\n\n logging.info(\"Done parsing Strava data.\")\n except Exception:\n logging.exception(\"Could not parse Strava data\")", "def get_activity_feed(context, term):\n if not term:\n raise ValueError('You have to provide a search term!')\n url = '{}{}'.format(context.test_url, term)\n response = requests.get(url, timeout=context.request_timeout)\n context.response = response\n logging.debug('Request URL: %s', response.request.url)\n logging.debug('Request headers:\\n%s', pformat(response.request.headers))\n logging.debug('Response headers:\\n%s', pformat(response.headers))\n logging.debug('Response content:\\n%s', pformat(response.json()))", "def separate_activity_types(self):\n # Read in the CSV file and make a DataFrame.\n try :\n all_actsDF = pd.read_csv('strava-activities.csv', index_col=\"id\", parse_dates=[\"start_date\", \"start_date_local\"])\n except FileNotFoundError :\n print(\"separate_activity_types couldn't find strava-activities.csv.\")\n else :\n # We need to make sure that all_actsDF has all of the columns that are referenced\n # in the loop below. Otherwise, the code might throw a key error. For example, if someone\n # has no heart rate data at all, stava-activities.csv won't have a max_heartrate column,\n # causing the code to blow up when it looks for that column. So just add empty columns\n # as needed.\n necessary_columns = [\"distance\", \"total_elevation_gain\", \"elapsed_time\", \"moving_time\", \"max_speed(mph)\", \"max_speed(kph)\", \"start_date\", \"elevation_gain(ft)\", \"max_heartrate\"]\n for col in necessary_columns :\n if not col in all_actsDF.columns :\n all_actsDF[col] = np.nan\n\n # Get the list of unique activity types (Ride, Hike, Kayak, etc.)\n act_types = all_actsDF[\"type\"].unique()\n # Get the list of unique years in the data.\n # Extract each year out of the data and sort them.\n years = pd.Series(d.year for d in all_actsDF[\"start_date\"]).unique()\n years.sort()\n\n # Create a dataframe that will hold summary statistics for each activity.\n # The index or the set of rows is the activity types. The columns are the stats\n # we are interested in.\n stats = [\"Total Distance (miles)\", \"Total Distance (km)\", \"Total Elev. Gain (meters)\", \"Total Elev. Gain (ft)\", \"Total Elev. Gain (miles)\", \"Total Elev. Gain (km)\", \"Total Duration (hours)\", \"Total Duration (days)\", \"Average Duration (min)\", \"Total Moving Time (hours)\", \"Total Moving Time (days)\", \"Average Moving Time (min)\", \"Average Speed (mph)\", \"Average Speed (kph)\", \"Max Speed (mph)\", \"Max Speed (kph)\", \"Max Speed Date\", \"Max Elevation Gain(ft)\", \"Max Elevation Gain(m)\", \"Max Elevation Gain Date\", \"Max Heart Rate\", \"Max HR Date\"]\n summaryDF = pd.DataFrame(index=act_types, columns=stats)\n # Loop through all of the activity types and add info into the summary file.\n # Also create a csv for each activity that has the Strava info for that activity only.\n for act in act_types:\n actDF = all_actsDF[all_actsDF[\"type\"] == act]\n actDF.to_csv(act + \".csv\")\n # Add the summary stats\n summaryDF.loc[act, \"Total Distance (miles)\"] = actDF[\"distance\"].sum() * 0.000621371\n summaryDF.loc[act, \"Total Distance (km)\"] = actDF[\"distance\"].sum() / 1000\n summaryDF.loc[act, \"Total Elev. Gain (meters)\"] = actDF[\"total_elevation_gain\"].sum()\n summaryDF.loc[act, \"Total Elev. Gain (ft)\"] = actDF[\"total_elevation_gain\"].sum() * 3.28084\n summaryDF.loc[act, \"Total Elev. Gain (miles)\"] = actDF[\"total_elevation_gain\"].sum() * 3.28084/5280\n summaryDF.loc[act, \"Total Elev. Gain (km)\"] = actDF[\"total_elevation_gain\"].sum() / 1000\n summaryDF.loc[act, \"Total Duration (hours)\"] = actDF[\"elapsed_time\"].sum() / 3600\n summaryDF.loc[act, \"Total Duration (days)\"] = actDF[\"elapsed_time\"].sum() / (3600*24)\n summaryDF.loc[act, \"Average Duration (min)\"] = actDF[\"elapsed_time\"].mean() / 60\n summaryDF.loc[act, \"Total Moving Time (hours)\"] = actDF[\"moving_time\"].sum() / 3600\n summaryDF.loc[act, \"Total Moving Time (days)\"] = actDF[\"moving_time\"].sum() / (3600*24)\n summaryDF.loc[act, \"Average Moving Time (min)\"] = actDF[\"moving_time\"].mean() / 60\n summaryDF.loc[act, \"Average Speed (mph)\"] = (actDF[\"distance\"].sum() / actDF[\"moving_time\"].sum()) * 2.23694\n summaryDF.loc[act, \"Average Speed (kph)\"] = (actDF[\"distance\"].sum() / actDF[\"moving_time\"].sum()) * 3.6\n summaryDF.loc[act, \"Max Speed (mph)\"] = actDF[\"max_speed(mph)\"].max()\n summaryDF.loc[act, \"Max Speed (kph)\"] = actDF[\"max_speed(kph)\"].max()\n # We have to be careful anytime we want a specific date that something occured because\n # it may never have occurred and the result may be empty. That's why we do the following\n # five lines.\n s = actDF.loc[actDF[\"max_speed(mph)\"] == actDF[\"max_speed(mph)\"].max(), \"start_date\"]\n if not s.empty :\n summaryDF.loc[act, \"Max Speed Date\"] = s.iloc[0].date()\n else :\n summaryDF.loc[act, \"Max Speed Date\"] = None\n summaryDF.loc[act, \"Max Elevation Gain(ft)\"] = actDF[\"elevation_gain(ft)\"].max()\n summaryDF.loc[act, \"Max Elevation Gain(m)\"] = actDF[\"total_elevation_gain\"].max()\n s = actDF.loc[actDF[\"elevation_gain(ft)\"] == actDF[\"elevation_gain(ft)\"].max(), \"start_date\"]\n if not s.empty :\n summaryDF.loc[act, \"Max Elevation Gain Date\"] = s.iloc[0].date()\n else :\n summaryDF.loc[act, \"Max Elevation Gain Date\"] = None\n summaryDF.loc[act, \"Max Heart Rate\"] = actDF[\"max_heartrate\"].max()\n # We have to be careful with max heart rate because not all activities will have HR data.\n # The following code makes sure there is HR data before trying to access it.\n s = actDF.loc[actDF[\"max_heartrate\"] == actDF[\"max_heartrate\"].max(), \"start_date\"]\n if not s.empty :\n summaryDF.loc[act, \"Max HR Date\"] = s.iloc[0].date()\n else:\n summaryDF.loc[act, \"Max HR Date\"] = None\n\n # Summarize each activity by year\n act_summaryDF = pd.DataFrame(index=stats, columns = years)\n for y in years :\n subDF = actDF[(actDF[\"start_date\"] >= datetime.datetime(year = y, month = 1, day = 1, tzinfo=pytz.utc)) & (actDF[\"start_date\"] < datetime.datetime(year = y+1, month = 1, day = 1, tzinfo=pytz.utc))]\n # Need to check that we had any of this activity in the year.\n if not subDF.empty :\n act_summaryDF.loc[\"Total Distance (miles)\", y] = subDF[\"distance\"].sum() * 0.000621371\n act_summaryDF.loc[\"Total Distance (km)\", y] = subDF[\"distance\"].sum() / 1000\n act_summaryDF.loc[\"Total Elev. Gain (meters)\", y] = subDF[\"total_elevation_gain\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (ft)\", y] = subDF[\"total_elevation_gain\"].sum() * 3.28084\n act_summaryDF.loc[\"Total Elev. Gain (miles)\", y] = subDF[\"total_elevation_gain\"].sum() * 3.28084/5280\n act_summaryDF.loc[\"Total Elev. Gain (km)\", y] = subDF[\"total_elevation_gain\"].sum() / 1000\n act_summaryDF.loc[\"Total Duration (hours)\", y] = subDF[\"elapsed_time\"].sum() / 3600\n act_summaryDF.loc[\"Total Duration (days)\", y] = subDF[\"elapsed_time\"].sum() / (3600*24)\n act_summaryDF.loc[\"Average Duration (min)\", y] = subDF[\"elapsed_time\"].mean() / 60\n act_summaryDF.loc[\"Total Moving Time (hours)\", y] = subDF[\"moving_time\"].sum() / 3600\n act_summaryDF.loc[\"Total Moving Time (days)\", y] = subDF[\"moving_time\"].sum() / (3600*24)\n act_summaryDF.loc[\"Average Moving Time (min)\", y] = subDF[\"moving_time\"].mean() / 60\n act_summaryDF.loc[\"Average Speed (mph)\", y] = (subDF[\"distance\"].sum() / subDF[\"moving_time\"].sum()) * 2.23694\n act_summaryDF.loc[\"Average Speed (kph)\", y] = (subDF[\"distance\"].sum() / subDF[\"moving_time\"].sum()) * 3.6\n act_summaryDF.loc[\"Max Speed (mph)\", y] = subDF[\"max_speed(mph)\"].max()\n act_summaryDF.loc[\"Max Speed (kph)\", y] = subDF[\"max_speed(kph)\"].max()\n s = subDF.loc[subDF[\"max_speed(mph)\"] == subDF[\"max_speed(mph)\"].max(), \"start_date\"]\n if not s.empty:\n act_summaryDF.loc[\"Max Speed Date\", y] = s.iloc[0].date()\n else :\n act_summaryDF.loc[\"Max Speed Date\", y] = None\n\n act_summaryDF.loc[\"Max Elevation Gain(ft)\", y] = subDF[\"elevation_gain(ft)\"].max()\n act_summaryDF.loc[\"Max Elevation Gain(m)\", y] = subDF[\"total_elevation_gain\"].max()\n s = subDF.loc[subDF[\"elevation_gain(ft)\"] == subDF[\"elevation_gain(ft)\"].max(), \"start_date\"]\n if not s.empty :\n act_summaryDF.loc[\"Max Elevation Gain Date\", y] = s.iloc[0].date()\n else :\n act_summaryDF.loc[\"Max Elevation Gain Date\", y] = None\n act_summaryDF.loc[\"Max Heart Rate\", y] = subDF[\"max_heartrate\"].max()\n s = subDF.loc[subDF[\"max_heartrate\"] == subDF[\"max_heartrate\"].max(), \"start_date\"]\n if not s.empty :\n act_summaryDF.loc[\"Max HR Date\", y] = s.iloc[0].date()\n else:\n act_summaryDF.loc[\"Max HR Date\", y] = None\n # Add a few totals\n act_summaryDF.loc[\"Total Distance (miles)\", \"Total\"] = act_summaryDF.loc[\"Total Distance (miles)\"].sum()\n act_summaryDF.loc[\"Total Distance (km)\", \"Total\"] = act_summaryDF.loc[\"Total Distance (km)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (meters)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (meters)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (ft)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (ft)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (miles)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (miles)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (km)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (km)\"].sum()\n act_summaryDF.loc[\"Total Duration (hours)\", \"Total\"] = act_summaryDF.loc[\"Total Duration (hours)\"].sum()\n act_summaryDF.loc[\"Total Duration (days)\", \"Total\"] = act_summaryDF.loc[\"Total Duration (days)\"].sum()\n\n act_summaryDF.loc[\"Average Duration (min)\", \"Total\"] = summaryDF.loc[act, \"Average Duration (min)\"]\n act_summaryDF.loc[\"Total Moving Time (hours)\", \"Total\"] = act_summaryDF.loc[\"Total Moving Time (hours)\"].sum()\n act_summaryDF.loc[\"Total Moving Time (days)\", \"Total\"] = act_summaryDF.loc[\"Total Moving Time (days)\"].sum()\n act_summaryDF.loc[\"Average Moving Time (min)\", \"Total\"] = summaryDF.loc[act, \"Average Moving Time (min)\"]\n act_summaryDF.loc[\"Average Speed (mph)\", \"Total\"] = summaryDF.loc[act, \"Average Speed (mph)\"]\n act_summaryDF.loc[\"Average Speed (kph)\", \"Total\"] = summaryDF.loc[act, \"Average Speed (kph)\"]\n act_summaryDF.loc[\"Max Speed (mph)\", \"Total\"] = act_summaryDF.loc[\"Max Speed (mph)\"].max()\n act_summaryDF.loc[\"Max Speed (kph)\", \"Total\"] = act_summaryDF.loc[\"Max Speed (kph)\"].max()\n act_summaryDF.loc[\"Max Speed Date\", \"Total\"] = summaryDF.loc[act, \"Max Speed Date\"]\n act_summaryDF.loc[\"Max Elevation Gain(ft)\", \"Total\"] = summaryDF.loc[act, \"Max Elevation Gain(ft)\"]\n act_summaryDF.loc[\"Max Elevation Gain(m)\", \"Total\"] = summaryDF.loc[act, \"Max Elevation Gain(m)\"]\n act_summaryDF.loc[\"Max Elevation Gain Date\", \"Total\"] = summaryDF.loc[act, \"Max Elevation Gain Date\"]\n act_summaryDF.loc[\"Max Heart Rate\", \"Total\"] = summaryDF.loc[act, \"Max Heart Rate\"]\n act_summaryDF.loc[\"Max HR Date\", \"Total\"] = summaryDF.loc[act, \"Max HR Date\"]\n\n # Print the annual summary\n act_summaryDF.to_csv(act + \"-by-year.csv\")\n\n # Print the summary to a csv\n\n summaryDF.to_csv(\"strava-summary.csv\")", "def activity(self, activity):\n allowed_values = [\"PICKUP\", \"DROPOFF\", \"EXECUTE\", \"BREAK\"] # noqa: E501\n if activity not in allowed_values:\n raise ValueError(\n \"Invalid value for `activity` ({0}), must be one of {1}\" # noqa: E501\n .format(activity, allowed_values)\n )\n\n self._activity = activity", "def get_activity_list(self):\n return self._request_activity_list(self.athlete)", "async def get_user_activity(self, username: str) -> 'Response':\n headers = {\n 'Content-Type': 'application/json'\n }\n response = await self._client.request(method=RequestMethods.GET,\n url=USERS_ACTIVITY_URL.format(username=username),\n headers=headers)\n return response", "def event_activity_csv(event_id):\n limit = request.args.get('limit') or 50\n q = request.args.get('q') or None\n if q and len(q) < 3:\n q = None\n csvstream = gen_csv(get_event_activities(event_id, limit, q))\n headers = {'Content-Disposition': 'attachment; filename=activity_list.csv'}\n return Response(stream_with_context(csvstream),\n mimetype='text/csv', headers=headers)", "def get_activities(cls):\n objs = cls.objects\n return objs", "def get_activity(pt=None, activity_wt=None):\n if len(pt) != len(activity_wt):\n print(\"Pt. dimension doesn't match to desired dimension\")\n print('pt dim:', len(pt), 'required dim:', len(activity_wt))\n exit(1)\n arg = get_sigmoid(np.dot(pt, activity_wt))\n # arg = np.dot(pt, activity_wt)\n return arg", "def main(to_be_scheduled):\n\n tasks = order_by_ftime(to_be_scheduled)\n print select_activity(tasks)", "def search_activity(conn, request):\n\n c = conn.cursor()\n search_query = \"SELECT * FROM Activity T1 WHERE T1.Name LIKE ?\"\n c.execute(search_query, (request,))\n result = c.fetchall()\n return result", "def get(self, id):\n activity = Activity().get(id)\n if not activity:\n abort(404, \"Activity not found\")\n return activity", "def test_get_activity_template(self):\n pass", "def activities_list(self):\n self.__load_activities_from_file_into_memory()\n return self._activities_list", "def _get_future_from_activity_event(self, event):\n future = futures.Future() # state is PENDING.\n state = event['state']\n\n if state == 'scheduled':\n future._state = futures.PENDING\n elif state == 'schedule_failed':\n if event['cause'] == 'ACTIVITY_TYPE_DOES_NOT_EXIST':\n activity_type = swf.models.ActivityType(\n self.domain,\n name=event['activity_type']['name'],\n version=event['activity_type']['version'])\n logger.info('Creating activity type {} in domain {}'.format(\n activity_type.name,\n self.domain.name))\n try:\n activity_type.save()\n except swf.exceptions.AlreadyExistsError:\n logger.info(\n 'Activity type {} in domain {} already exists'.format(\n activity_type.name,\n self.domain.name))\n return None\n logger.info('failed to schedule {}: {}'.format(\n event['activity_type']['name'],\n event['cause'],\n ))\n return None\n elif state == 'started':\n future._state = futures.RUNNING\n elif state == 'completed':\n future._state = futures.FINISHED\n result = event['result']\n future._result = json.loads(result) if result else None\n elif state == 'canceled':\n future._state = futures.CANCELLED\n elif state == 'failed':\n future._state = futures.FINISHED\n future._exception = exceptions.TaskFailed(\n name=event['id'],\n reason=event['reason'],\n details=event.get('details'),\n )\n elif state == 'timed_out':\n future._state = futures.FINISHED\n future._exception = exceptions.TimeoutError(\n event['timeout_type'],\n event['timeout_value'])\n\n return future", "def create_activity(self, created_user, source, action,\n privacy=Privacy.PRIVATE, **kwargs):\n Activity = get_activity_model()\n return Activity.objects.create(\n about=self,\n action=action,\n created_user=created_user,\n source=source,\n privacy=privacy,\n **kwargs\n )", "def test_get_detailed_activity():\n tokens = get_tokens()\n activity = get_detailed_activity(4563031911, tokens)\n # this activity does not have a description\n assert activity.status_code == 200\n activity = activity.json()\n assert type(activity[\"id\"]) == int\n assert type(activity[\"distance\"]) == float\n assert type(activity[\"moving_time\"]) == int\n assert type(activity[\"elapsed_time\"]) == int\n assert type(activity[\"total_elevation_gain\"]) == float\n assert type(activity[\"elev_high\"]) == float\n assert type(activity[\"elev_low\"]) == float\n assert type(activity[\"type\"]) == str\n assert type(activity[\"start_date\"]) == str\n assert type(activity[\"average_speed\"]) == float\n assert type(activity[\"gear_id\"]) == str\n assert type(activity[\"description\"]) is type(None)\n activity = get_detailed_activity(4576599261, tokens)\n assert activity.status_code == 200\n activity = activity.json()\n # this activity has a description but I added it manually so there's no elev high or low\n assert type(activity[\"description\"]) == str\n\n assert type(activity[\"id\"]) == int\n assert type(activity[\"distance\"]) == float\n assert type(activity[\"moving_time\"]) == int\n assert type(activity[\"elapsed_time\"]) == int\n assert type(activity[\"total_elevation_gain\"]) == float\n assert type(activity[\"type\"]) == str\n assert type(activity[\"start_date\"]) == str\n assert type(activity[\"average_speed\"]) == float\n assert type(activity[\"gear_id\"]) == str", "def activity_logs(self) -> api.ActivityLogs:\n return self._get_model(model=api.ActivityLogs)", "def test_get_detailed_activity(self, Activity1, StravaTokens1):\n self.mock_get.return_value = Mock(ok=True)\n self.mock_get.return_value.json.return_value = Activity1\n strava_tokens = StravaTokens1\n response = get_detailed_activity(12345678987654321, strava_tokens)\n assert response.ok is True\n assert response.json() == Activity1", "def add_activity(self, activity, table):\n week = self.t.timeline[\"week\" + str(self.week)]\n self.t.add_activity(week, activity)\n self.clear_frame(table)\n self.show_table(self.t.timeline[\"week\" + str(self.week)], table)", "def create_activities(app_state) -> list:\n activity_specs = app_state.personality.get_states()\n activity_objects = []\n if len(activity_specs) == 0:\n raise ValueError(\"No activities found.\")\n for activity_spec in activity_specs:\n this_object = create_activity(activity_spec, app_state)\n activity_objects.append(this_object)\n end_state_object = create_activity(end_state_config, app_state)\n activity_objects.append(end_state_object)\n return activity_objects", "def asset_activity(self, asset_id):\n response = self._client.get('workbenches/assets/%(asset_id)s/activity',\n path_params={'asset_id': asset_id})\n return AssetActivityList.from_json(response.text)", "def activities_to_jsonfeed(activities, actor=None, title=None, feed_url=None,\n home_page_url=None):\n try:\n iter(activities)\n except TypeError:\n raise TypeError('activities must be iterable')\n\n if isinstance(activities, (dict, str)):\n raise TypeError('activities may not be a dict or string')\n\n def image_url(obj):\n return util.get_first(obj, 'image', {}).get('url')\n\n def actor_name(obj):\n return obj.get('displayName') or obj.get('username')\n\n if not actor:\n actor = {}\n\n items = []\n for activity in activities:\n obj = as1.get_object(activity) or activity\n if obj.get('objectType') == 'person':\n continue\n author = as1.get_object(obj, 'author')\n content = microformats2.render_content(\n obj, include_location=True, render_attachments=True,\n # Readers often obey CSS white-space: pre strictly and don't even line wrap,\n # so don't use it. https://github.com/snarfed/granary/issues/456\n white_space_pre=False)\n obj_title = obj.get('title') or obj.get('displayName')\n item = {\n 'id': obj.get('id') or obj.get('url'),\n 'url': obj.get('url'),\n 'image': image_url(obj),\n 'title': obj_title if mf2util.is_name_a_title(obj_title, content) else None,\n 'summary': obj.get('summary'),\n 'content_html': content,\n 'date_published': obj.get('published'),\n 'date_modified': obj.get('updated'),\n 'author': {\n 'name': actor_name(author),\n 'url': author.get('url'),\n 'avatar': image_url(author),\n },\n 'attachments': [],\n }\n\n for att in obj.get('attachments', []):\n url = util.get_url(att, 'stream') or util.get_url(att, 'image')\n mime = mimetypes.guess_type(url)[0] if url else None\n if (att.get('objectType') in ATTACHMENT_TYPES or\n mime and mime.split('/')[0] in ATTACHMENT_TYPES):\n item['attachments'].append({\n 'url': url or '',\n 'mime_type': mime,\n 'title': att.get('title'),\n })\n\n if not item['content_html']:\n item['content_text'] = ''\n items.append(item)\n\n return util.trim_nulls({\n 'version': 'https://jsonfeed.org/version/1',\n 'title': title or actor_name(actor) or 'JSON Feed',\n 'feed_url': feed_url,\n 'home_page_url': home_page_url or actor.get('url'),\n 'author': {\n 'name': actor_name(actor),\n 'url': actor.get('url'),\n 'avatar': image_url(actor),\n },\n 'items': items,\n }, ignore='content_text')", "def __init__(self, log=None):\n sppasBaseAnnotation.__init__(self, \"activity.json\", log)\n self.__activity = Activity()", "def test_workflows_change_stream_get(self):\n pass", "def post_activities():\n pass", "def get_activityType(activity_task):\n try:\n return activity_task[\"activityType\"][\"name\"]\n except KeyError:\n # No activityType found\n return None", "def set_current_activity(client, activity_label):\n\n id = activities_by_name[activity_label]\n func = client.start_activity(id)\n status = run_in_loop_now('start_activity', func)\n return status", "def orchestrator_function(context: df.DurableOrchestrationContext) -> List[str]:\r\n\r\n\r\n logging.debug(\"Creating the orchestrator function\")\r\n\r\n json_rule = {\r\n \"condition\": {\r\n \"wait_events\": [\"A\",\"B\"],\r\n \"logic\": \"and\"\r\n },\r\n \"satisfied\":[\r\n {\r\n \"activity_func_name\": \"SuccessActions\",\r\n \"args\": {\r\n \"name\": \"Tokyo\"\r\n }\r\n }\r\n ]\r\n }\r\n\r\n tasks = []\r\n for event in json_rule[\"condition\"][\"wait_events\"]:\r\n tasks.append(context.wait_for_external_event(event))\r\n logging.debug(\"Added event {} to list of tasks\".format(event))\r\n\r\n if json_rule[\"condition\"][\"logic\"] == 'and':\r\n logging.info(\"A logical <and> rule was found\")\r\n yield context.task_all(tasks)\r\n elif json_rule[\"condition\"][\"logic\"] == 'or':\r\n logging.info(\"A logical <or> rule was found\")\r\n yield context.task_any(tasks)\r\n\r\n output = []\r\n for action in json_rule[\"satisfied\"]:\r\n result = yield context.call_activity(action[\"activity_func_name\"], action[\"args\"])\r\n output.append(result)\r\n\r\n return output", "def query_user_activities(username):\n github_endpoint = 'https://api.github.com/users/{}/events/public'.format(username)\n return requests.get(url=github_endpoint).text", "def test_api_get_activity_by_id(self):\n # create a bucket\n res = self.register_login_get_token()\n self.assertEqual(res.status_code, 201)\n\n # create a activity\n res = self.client().post('/bucketlist/1/activities',\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.activity)\n self.assertEqual(res.status_code, 201)\n # get activity created\n activity_created = json.loads(res.data.decode())\n # get activity by its ID\n res = self.client().get('/bucketlist/1/activities/{}'.format(activity_created['id']),\n headers=dict(\n Authorization=\"Bearer \" + self.access_token))\n self.assertEqual(res.status_code, 200)\n self.assertIn('Shop in', str(res.data))", "def __ui_add_new_activity(self):\n activity_id = int(input(\"Activity ID: \"))\n existing_persons_ids = self.__person_service.get_existing_persons_ids()\n string_of_participants_ids = input(\n f\"Participants' IDs (you can choose from the list: {existing_persons_ids})\\n > \")\n list_of_participants_ids = self.__ui_convert_ids_string_to_list(string_of_participants_ids)\n activity_description = input(\"Describe the activity: \")\n activity_date = {\n \"year\": int(input(\"Year: \")),\n \"month\": int(input(\"Month: \")),\n \"day\": int(input(\"Day: \"))\n }\n activity_time = int(input(\"Time: \"))\n\n self.__activity_service.service_add_activity(activity_id,\n list_of_participants_ids,\n activity_date,\n activity_time,\n activity_description)\n print(\"Activity successfully added to your agenda!\\n\")", "def get_activities_dictionary(self):\r\n activities_dict_list = list()\r\n activities = self.get_specific_node_list('activity')\r\n for activity in activities:\r\n activities_dict = dict()\r\n activity_name = None\r\n category = None\r\n for key, val in activity.attrib.iteritems():\r\n if \"}name\" in key:\r\n activity_name = val.split(\".\")[-1]\r\n break\r\n if activity_name:\r\n intent_filter_node = self.get_specific_node_list('intent-filter', root_node=activity)\r\n if len(intent_filter_node) == 1:\r\n categories_nodes = self.get_specific_node_list('category', root_node=intent_filter_node[0])\r\n category = self.get_category_value(categories_nodes)\r\n else:\r\n category = None\r\n activities_dict[\"name\"] = activity_name\r\n activities_dict[\"category\"] = category\r\n activities_dict_list.append(activities_dict)\r\n return activities_dict_list", "def test_activity_flag(self, example_staypoints):\n # take out staypoint 6 that should have been merged with 2, 15\n sp = example_staypoints\n data = [True, True, True, True, False, True, True, True]\n idx = [1, 2, 3, 5, 6, 7, 15, 80]\n activities = pd.Series(data, index=idx)\n sp[\"activity\"] = activities\n sp, _ = sp.as_staypoints.generate_locations(\n method=\"dbscan\",\n epsilon=10,\n num_samples=2,\n distance_metric=\"haversine\",\n agg_level=\"user\",\n activities_only=True,\n )\n assert sp.loc[1, \"location_id\"] == sp.loc[15, \"location_id\"]\n assert sp.loc[2, \"location_id\"] is pd.NA", "def _input_fn(input_pipeline_context=None):\n return _create_dataset(options, is_training, input_pipeline_context)", "def projects_activity_json():\n limit = request.args.get('limit') or 10\n q = request.args.get('q') or None\n if q and len(q) < 3:\n q = None\n return jsonify(activities=get_event_activities(None, limit, q))" ]
[ "0.652813", "0.64412045", "0.6052704", "0.60521966", "0.59815174", "0.5974512", "0.58580536", "0.58321756", "0.57499844", "0.57043284", "0.5635826", "0.5635826", "0.55382925", "0.5494571", "0.5475397", "0.5416918", "0.53558916", "0.5349023", "0.53315115", "0.5306385", "0.52927756", "0.52829766", "0.5280142", "0.526133", "0.5234031", "0.5233257", "0.5224578", "0.52213764", "0.5152427", "0.50889534", "0.5088881", "0.5077504", "0.5054207", "0.5054207", "0.5054207", "0.5054207", "0.50397015", "0.5028175", "0.50210446", "0.5020251", "0.5012177", "0.50024515", "0.49779102", "0.49752638", "0.49703896", "0.49683043", "0.49276838", "0.49094105", "0.49091873", "0.49069", "0.48994237", "0.48738348", "0.48703706", "0.48596677", "0.48569995", "0.48563114", "0.48389518", "0.4815986", "0.4814185", "0.48106048", "0.48104224", "0.48095426", "0.48080602", "0.4800406", "0.47959152", "0.47923842", "0.47923464", "0.47896457", "0.47895682", "0.47891492", "0.4783886", "0.4778258", "0.47682187", "0.47659743", "0.47575706", "0.4753452", "0.47521695", "0.47451818", "0.47301513", "0.4727996", "0.47201246", "0.4716863", "0.47164398", "0.47107252", "0.47079998", "0.47067478", "0.4692556", "0.46838447", "0.46745843", "0.4668599", "0.46670887", "0.46659473", "0.46645206", "0.4658772", "0.46414104", "0.46403554", "0.46399978", "0.46350905", "0.4632952", "0.4631038" ]
0.64874995
1
get constraints of the current layer
def get_constraints(self, prev_layer): constraints = [] if self.activation is not None: constraints += self.activation.get_constraints(self, prev_layer) else: # for linear activations current_constraints = [] for channel_indx in range(self.n_in_channels): upper_bound, _ = prev_layer.get_bounds(channel_indx) critical_prob = prev_layer.get_critical_neurons(channel_indx) if critical_prob is None: keep_upper_bound = 0 else: keep_upper_bound = cp.multiply(1 - critical_prob, upper_bound) current_constraints += [ self.layer_input[channel_indx] == prev_layer.get_computation_layer(channel_indx) - keep_upper_bound ] constraints += self.create_constraint( f"{self.name}_linear", current_constraints ) if prev_layer.compute_critical_neurons: constraints += self.create_constraint( f"neuron_importance_bounds_{prev_layer.name}", [prev_layer.neuron_importance >= 0, prev_layer.neuron_importance <= 1], ) return constraints
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_constraints(self):\n return self.constraints", "def constraints(self):\n return self._constraints", "def constraints(self):\n return self._constraints", "def get_constraints(self):\n\n return vertcat(*self.g), self.g_min, self.g_max", "def getConstraint(self):\n return self.gk, self.g_mink, self.g_maxk", "def get_basicConstraints(self):\n\n return self.get_POW().getBasicConstraints()", "def constraints(self):\n constraints = np.concatenate( (np.ravel(self.noise_var_constraint), \n self.kern.constraints), axis=0)\n return constraints", "def get_constraints(self, scaled=True, use_indices=True):\n return self._get_variables_of_type('constraint', scaled, use_indices)", "def constraints(self) -> constraints.QuantumCircuitConstraints:\n return self._constraints", "def get_constraints(model):\n with connection.cursor() as cursor:\n return connection.introspection.get_constraints(cursor, model._meta.db_table)", "def getConstraints(self, nStates, nParams):\n # currently untested and unused\n raise NotImplementedError(\n \"constraints have not been implemented for this Experiment\")", "def constraints(self):\n ans = self.execute(self.commands.get_constraints(self.db.name, self.name))\n return [Constraint(*tup) for tup in ans]", "def constraints(self):\n ...", "def constraints(self) -> Tuple[NDArray, NDArray]:", "def getConstraint(self, *args):\n return _libsbml.Model_getConstraint(self, *args)", "def get_constraints(self):\n return ({'type': 'ineq', 'fun': lambda x: x[1] - x[2]},\n {'type': 'ineq', 'fun': lambda x: x[3] - x[4]})", "def optimization_bounds(self, topology):\n bounds_low = np.zeros(self.number_of_parameters())\n bounds_up = np.zeros(self.number_of_parameters())\n\n for pkey, parameter in self.parameters.items():\n bounds_low[pkey] = parameter.bound_low(topology)\n bounds_up[pkey] = parameter.bound_up(topology)\n\n return bounds_low, bounds_up", "def constraint(self) -> Constraint:\n return self._constraint", "def constraints(self, x):\n pass", "def get_constraint_list(self):\n constraints = []\n for i in xrange(self.num_repeats):\n # Using start_index, start each domain at the correct index when flattening out points in COBYLA.\n constraints.extend(self._domain.get_constraint_list(start_index=self.dim * i))\n return constraints", "def get_constraints(self, X_v, U_v, X_last_p, U_last_p):\n\n constraints = [\n # Boundary conditions:\n X_v[0:2, 0] == self.x_init[0:2],\n X_v[2:4, 0] == self.x_init[2:4],\n X_v[4, 0] == self.x_init[4],\n X_v[5, 0] == self.x_init[5],\n\n X_v[:, -1] == self.x_final,\n\n # State constraints:\n cvx.abs(X_v[4, :]) <= self.t_max,\n cvx.abs(X_v[5, :]) <= self.w_max,\n X_v[1, :] >= 0,\n\n # Control constraints:\n cvx.abs(U_v[0, :]) <= self.max_gimbal,\n U_v[1, :] >= self.T_min,\n U_v[1, :] <= self.T_max,\n ]\n return constraints", "def init_constraint_list(self):\n constraints = []\n for row, equ_val, rhs_val in \\\n zip(self.matrix, self.equ_vec, self.rhs_vec):\n\n constraints.append({'type': self.get_eq_type(equ_val),\n 'fun': lambda x: rhs_val - np.dot(row, x)})\n\n bounds = Bounds(self.low_bounds, self.upper_bounds)\n\n return constraints, bounds", "def table_constraints(self) -> 'outputs.TableConstraintsResponse':\n return pulumi.get(self, \"table_constraints\")", "def get_bounds_parameters(self):\n bounds = []\n bounds += self.var_noise.bounds\n bounds += self.mean.bounds\n bounds += self.kernel.get_bounds_parameters()\n\n return bounds", "def constraints(self):\n # Turn softmax output to categories.\n predictions = (1 + tf.sign(self.predictions)) / 2\n\n # Set the constraint to zero.\n self.constraint = 0\n ct = list()\n\n # Compute DIDI constraint.\n for I in self.I_train:\n N = tf.reduce_sum(tf.cast(I >= 0, dtype=tf.float32))\n Np = tf.reduce_sum(I)\n a = (tf.reduce_sum(predictions) / N)\n b = (tf.reduce_sum(I * predictions) / Np)\n\n tmp = tf.cond(Np > 0, lambda: 2 * (a - b), lambda: 0.0)\n ct.append(tf.abs(tmp))\n\n # ConstrainedMinimizationProblems must always provide their constraints in\n # the form (tensor <= 0).\n # return self.constraint - self.constraint_value\n return sum(ct) - self.constraint_value", "def bounds(self):\n return self.kernel.bounds", "def getListOfConstraints(self, *args):\n return _libsbml.Model_getListOfConstraints(self, *args)", "def getTailConstraints(self, featureName):\n # start with an empty list of constraints\n lstConstraints = []\n # loop through all constraints\n for constraint in self.constraints:\n # if the feature name appears in the tail of the constraint\n if featureName == constraint.tail.name:\n # add the constraint to our list\n lstConstraints.append(constraint)\n # return our list of constraints\n return lstConstraints", "def build_constraints_boundaries(self):\n\n # Trapezoidal and Hermite-Simpson methods can't compute\n # defects at the last node contrary to pseudospectral methods\n coll_method = self.options['tr_method'] in [\n 'trapezoidal', 'hermite-simpson']\n n_nodes = self.problem.prm['n_nodes'] - \\\n 1 if coll_method else self.problem.prm['n_nodes']\n\n # Defects lower and upper boundaries\n defects_low = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n defects_upp = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n\n # Path lower and upper boundaries\n path_low = np.hstack([self.problem.low_bnd.path]\n * (self.problem.prm['n_nodes']))\n path_upp = np.hstack([self.problem.upp_bnd.path]\n * (self.problem.prm['n_nodes']))\n\n # Events lower and upper boundaries\n event_low = self.problem.low_bnd.event\n event_upp = self.problem.upp_bnd.event\n\n # Assembly of the lower and upper boundaries vectors\n low = np.concatenate((defects_low, path_low, event_low))\n upp = np.concatenate((defects_upp, path_upp, event_upp))\n\n return low, upp", "def get_calculated_constraints(\n self,\n system_paasta_config: SystemPaastaConfig,\n service_namespace_config: ServiceNamespaceConfig,\n ) -> List[Constraint]:\n constraints = self.get_constraints()\n if constraints is not None:\n return constraints\n else:\n constraints = self.get_extra_constraints()\n constraints.extend(\n self.get_routing_constraints(\n service_namespace_config=service_namespace_config,\n system_paasta_config=system_paasta_config,\n )\n )\n constraints.extend(\n self.get_deploy_constraints(\n blacklist=self.get_deploy_blacklist(),\n whitelist=self.get_deploy_whitelist(),\n system_deploy_blacklist=system_paasta_config.get_deploy_blacklist(),\n system_deploy_whitelist=system_paasta_config.get_deploy_whitelist(),\n )\n )\n constraints.extend(self.get_pool_constraints())\n constraints.extend(\n self.get_hostname_unique_constraint(\n system_paasta_config=system_paasta_config\n )\n )\n return constraints", "def get_bounds(self, parameter_name=None):\n if parameter_name is None:\n return [self.get_bounds(p) for p in self.shape_parameters.keys()]\n elif parameter_name in list(self.__likelihood.rate_parameters.keys()) + list(self.__likelihood.shape_parameters.keys()):\n return self.__likelihood.get_bounds(parameter_name)\n # in the newly added parameters\n else:\n anchor_settings = list(self.shape_parameters[parameter_name][0].keys())\n return min(anchor_settings), max(anchor_settings)", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def get_params_bounds(self) -> np.array:\n pass", "def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)", "def constraintData(self):\n pass", "def remaining_constraints(self):\r\n \r\n def iec1(state,decision,nodes):\r\n return decision['E:L']+decision['E:R_1']<=nodes['E'].get_preds_value(state)\r\n def iec2(state,decision,nodes):\r\n return decision['R_1:L']<=nodes['R_1'].get_preds_value(state)\r\n def iec3(state,decision,nodes):\r\n return decision['G:R_1']>=-(nodes['R_1'].get_preds_value(state)) \r\n def iec4(state,decision,nodes):\r\n return decision['G:L']>=0.0\r\n def iec5(state,decision,nodes):\r\n return decision['E:L']>=0.0\r\n def iec6(state,decision,nodes):\r\n return decision['E:R_1']>=0.0\r\n def iec7(state,decision,nodes):\r\n return decision['R_1:L']>=0.0\r\n\r\n Inequality_Constraints=[iec1,iec2,iec3,iec4,iec5,iec6,iec7]\r\n \r\n return Inequality_Constraints", "def getConstraints(self, featureName):\n # start with an empty list of constraints\n lstConstraints = []\n # loop through all constraints\n for constraint in self.constraints:\n # if the feature name appears in the tail of the constraint\n if featureName == constraint.tail.name:\n # add the constraint to our list\n lstConstraints.append(constraint)\n # if the feature name appears in the head of the constraint\n if featureName == constraint.head.name:\n # add the constraint to our list\n lstConstraints.append(constraint)\n # return our list of constraints\n return lstConstraints", "def get_constraints_with(self, var):\n return [c for c in self.constraints if var.name in c.var_names]", "def constraints(self) -> Tuple[NDArray, NDArray]:\n symm = not self._asym\n k = 3 + self._asym\n a = np.zeros((5, k))\n b = np.zeros(5)\n # omega\n a[0, 0] = 1.0\n # alpha >0 or alpha+gamma>0\n # alpha<1 or alpha+0.5*gamma<1\n if symm:\n a[1, 1] = 1.0\n a[2, 1] = -1.0\n else:\n a[1, 1:3] = 1.0\n a[2, 1:3] = [-1, -0.5]\n b[2] = -1.0\n # theta\n a[3, k - 1] = 1.0\n a[4, k - 1] = -1.0\n b[4] = -1.0\n\n return a, b", "def placement_constraints(self) -> Optional[Sequence['outputs.ScheduleTargetEcsParametersPlacementConstraint']]:\n return pulumi.get(self, \"placement_constraints\")", "def bounds(self):\n if self.change_dimensionality:\n return [self._bounds[0]] * self.N\n else:\n return self._bounds", "def _constraints_utility(self):\n\n def rule(model):\n total = summation(self.utilities, model.A)\n return model.A_total == total\n\n self.model.constrain_A_total = Constraint(rule=rule)\n\n def rule(model):\n total = 2 * summation(self.utilities, model.A2)\n return model.A2_total == total\n\n self.model.constrain_A2_total = Constraint(rule=rule)\n\n def rule(model):\n total = 3 * summation(self.utilities, model.A3)\n return model.A3_total == total\n\n self.model.constrain_A3_total = Constraint(rule=rule)\n\n def rule(model):\n total = 4 * summation(self.utilities, model.A4)\n return model.A4_total == total\n\n self.model.constrain_A4_total = Constraint(rule=rule)\n\n def rule(model):\n completion_bonus = self.task_completion_bonus * self.task_duration\n total = summation(completion_bonus, model.T_total)\n return model.Completion_total == total\n\n self.model.constrain_completion_total = Constraint(rule=rule)\n\n def rule(model):\n scaling = 0.2\n affinity = np.outer(c.AFFINITY_COGNITIVE, self.task_cognitive_load)\n\n # TODO(cathywu) replace this code when \"simple slicing\" is clarified\n zeros1 = np.zeros((1, self.num_tasks))\n zeros2 = np.zeros((2, self.num_tasks))\n zeros3 = np.zeros((3, self.num_tasks))\n\n total = summation(affinity, model.A)\n total += summation(affinity, model.A2)\n total += summation(affinity, model.A3)\n total += summation(affinity, model.A4)\n\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A2)\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A3)\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A4)\n\n total += summation(np.vstack((affinity[2:, :], zeros2)), model.A3)\n total += summation(np.vstack((affinity[2:, :], zeros2)), model.A4)\n\n total += summation(np.vstack((affinity[3:, :], zeros3)), model.A4)\n total *= scaling\n\n return model.Affinity_cognitive_total == total\n\n self.model.constrain_affinity_cognitive_total = Constraint(rule=rule)", "def get(self, *args):\n return _libsbml.ListOfConstraints_get(self, *args)", "def getHeadConstraints(self, featureName):\n # start with an empty list of constraints\n lstConstraints = []\n # loop through all constraints\n for constraint in self.constraints:\n # if the feature name appears in the tail of the constraint\n if featureName == constraint.head.name:\n # add the constraint to our list\n lstConstraints.append(constraint)\n # return our list of constraints\n return lstConstraints", "def GetEqualConstrains(self):\n return _gmat_py.Spacecraft_GetEqualConstrains(self)", "def bounds(self): # -> tuple[()]:\n ...", "def condition_bounds(self) -> Tuple[float, float]:\n raise NotImplementedError", "def _constraints_external(self):\n pass", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def bounds(self):\n return self.GetBounds()", "def objective_constraints(self, variables, mask, load, generation, reservations=None):\n constraint_list = []\n constraint_list += [cvx.NonPos(-variables['regu_c'])]\n constraint_list += [cvx.NonPos(-variables['regd_c'])]\n constraint_list += [cvx.NonPos(-variables['regu_d'])]\n constraint_list += [cvx.NonPos(-variables['regd_d'])]\n # p = opt_vars['dis'] - opt_vars['ch']\n # constraint_list += [cvx.NonPos(opt_vars['regd_d'] - cvx.pos(p))]\n # constraint_list += [cvx.NonPos(opt_vars['regu_c'] - cvx.neg(p))]\n if self.combined_market:\n constraint_list += [cvx.Zero(variables['regd_d'] + variables['regd_c'] - variables['regu_d'] - variables['regu_c'])]\n\n return constraint_list", "def bounds(self):\n return self._bounds", "def input_bounds(self):\n return self.__input_bounds", "def get_constraints_for_variable(self, var):\n return (constraint for constraint in self.constraints\n if var.name in [constraint.var1.name, constraint.var2.name])", "def get_bounds(self):\n raise Exception(\"Non-implemented base class method.\")", "def getOpenConstraints(self, featureName):\n # start with an empty list of constraints\n lstConstraints = []\n # loop through all constraints\n for constraint in self.constraints:\n # if the feature name appears in the tail of the constraint and the head constraint\n # is unassigned\n if (featureName == constraint.tail.name) and (constraint.head.value == 'none'):\n # add the constraint to our list\n lstConstraints.append(constraint)\n # if the feature name appears in the head of the constraint and the tail constraint\n # is unassigned\n if (featureName == constraint.head.name) and (constraint.tail.value == 'none'):\n # add the constraint to our list\n lstConstraints.append(constraint)\n # return our list of constraints\n return lstConstraints", "def bounds(self):\n b = []\n\n for dim in self.dimensions:\n if dim.size == 1:\n b.append(dim.bounds)\n else:\n b.extend(dim.bounds)\n\n return b", "def get_constr_pars_low(self):\n constrLow = numpy.empty(self.get_num_parameters())\n i = 0\n for p in self.parameters:\n constrLow[i] = p.get_constraint_low()\n i += 1\n return constrLow", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def get_bounds(self, parameter_name=None):\n if parameter_name is None:\n return [self.get_bounds(p) for p in self.shape_parameters.keys()]\n if parameter_name in self.shape_parameters:\n anchor_settings = list(self.shape_parameters[parameter_name][0].keys())\n return min(anchor_settings), max(anchor_settings)\n elif parameter_name.endswith('_rate_multiplier'):\n for source_name, allow_negative in zip(self.source_name_list,self.source_allowed_negative):\n if parameter_name.startswith(source_name) and allow_negative==True:\n return float('-inf'), float('inf')\n return 0, float('inf')\n else:\n raise InvalidParameter(\"Non-existing parameter %s\" % parameter_name)", "def getNumConstraints(self):\n return _libsbml.Model_getNumConstraints(self)", "def createConstraint(self):\n return _libsbml.Model_createConstraint(self)", "def bounds(self):\n return [(2, None)]", "def input_bounds(self):\n if self._input_bounds:\n return dict(self._input_bounds)\n return None", "def input_bounds(self):\n if self._input_bounds:\n return dict(self._input_bounds)\n return None", "def bounds(self):\n return self.min_col, self.min_row, self.max_col, self.max_row", "def objective_constraints(self, variables, mask, reservations, mpc_ene=None):\n constraint_list = []\n ice_gen = variables['ice_gen']\n on_ice = variables['on_ice']\n\n constraint_list += [cvx.NonPos(cvx.multiply(self.p_min, on_ice) - ice_gen)]\n constraint_list += [cvx.NonPos(ice_gen - cvx.multiply(self.rated_power*self.n, on_ice))]\n\n return constraint_list", "def get_constraint_array(self, x):\n return np.array(self.constraint.get_g(x)).reshape((-1, 1))", "def force_bounds(self):\n return self._min_force, self._max_force", "def get_bounds(self):\n return self._geometry.bounds", "def number_of_constraints(self):\n return len(self.constraints)", "def _constraints_other(self):\n pass", "def generate_constraints():\n return list(chain(collect_rows(), collect_columns(), collect_blocks()))", "def _constraints_variables(self):\n\n def rule(model, k):\n \"\"\"\n Total slots allocated to category k\n \"\"\"\n ind_i = model.timeslots\n ind_i2 = model.timeslots2\n ind_i3 = model.timeslots3\n ind_i4 = model.timeslots4\n ind_j = model.tasks\n cat_k_total = sum(\n model.A[i, j] * self.task_category[j, k] for i in ind_i for j in\n ind_j)\n cat_k_total += 2 * sum(\n model.A2[i, j] * self.task_category[j, k] for i in ind_i2 for j\n in ind_j)\n cat_k_total += 3 * sum(\n model.A3[i, j] * self.task_category[j, k] for i in ind_i3 for j\n in ind_j)\n cat_k_total += 4 * sum(\n model.A4[i, j] * self.task_category[j, k] for i in ind_i4 for j\n in ind_j)\n return model.C_total[k] == cat_k_total\n\n self.model.constrain_cat_duration0 = Constraint(self.model.categories,\n rule=rule)\n\n def rule(model, s, k):\n \"\"\"\n S_cat[s,k] = whether (any tasks of) category k is assigned on day s\n \"\"\"\n den = sum(self.task_category[:, k])\n ind_j = model.tasks\n total = sum(self.task_category[j, k] * model.S[s, j] for j in\n ind_j) / den\n # Desired: S[i,j] = ceil(total)\n # Desired: S[i,j] = 0 if total <= 0; otherwise, S[i,j] = 1\n return -EPS, model.S_cat[s, k] - total, 1 - EPS\n\n self.model.constrain_cat_days0 = Constraint(self.model.dayslots,\n self.model.categories,\n rule=rule)\n\n def rule(model, k):\n \"\"\"\n S_cat_total[k] = number of unique days in which task from\n category k were assigned\n\n More precisely:\n sum_s S_cat[s,k] == S_cat_total[k]\n \"\"\"\n ind_s = model.dayslots\n total = sum(model.S_cat[s, k] for s in ind_s)\n return model.S_cat_total[k] == total\n\n self.model.constrain_cat_days1 = Constraint(self.model.categories,\n rule=rule)", "def get_bounds():\n bounds = [\n (0.1, 0.5), # Omega_m\n (0.05, 0.15) # beta\n ]\n return np.array(bounds)", "def z3_output_constraints(self, x):\n\n constraints = []\n T = x.shape[0]\n state_dim = x.shape[1]\n output_dim = self.output_dim\n\n y = np.array([[z3.Real(\"y%i_%i\" % (i,t)) for i in range(self.output_dim)] for t in range(T)])\n\n constraints = np.array([[y[t,i] == self.output_map(x[t])[i] for i in range(output_dim)] for t in range(T)])\n constraints = (constraints.flatten()).tolist()\n\n return constraints, y", "def get_constr_pars_high(self):\n constrHi = numpy.empty(self.get_num_parameters())\n i = 0\n for p in self.parameters:\n constrHi[i] = p.get_constraint_high()\n i += 1\n return constrHi", "def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax", "def main_constraints(self, batch, l_suff_stats):\n\n if self.mode == 'parametric':\n kl = 'i-projection'\n else:\n kl = 'm-projection'\n policy = self.mc.get('policy', target=False)\n t_states = torch.Tensor(batch.states)\n t_policy = self.mc.get('policy', target=True)\n target_suff_stats = t_policy.forward(t_states).detach()\n\n # split constraint if wanted\n if self.split_constraint:\n\n num_dims = target_suff_stats.size()[1]\n constraints = []\n for d in range(num_dims):\n c = (self.v[d] - policy.kl_divergence(batch, l_suff_stats[d+1], kl, 'mean'))\n constraints.append(c)\n\n else:\n constraints = [(self.v[0] - policy.kl_divergence(batch, l_suff_stats[0], kl, 'mean'))]\n\n return constraints", "def get_soft_bounds(self):\n if self.bounds is None:\n hl,hu=(None,None)\n else:\n hl,hu=self.bounds\n\n if self._softbounds is None:\n sl,su=(None,None)\n else:\n sl,su=self._softbounds\n\n \n if sl is None: l = hl\n else: l = sl\n\n if su is None: u = hu\n else: u = su\n\n return (l,u)", "def param_bounds(self) -> Optional[Sequence[Tuple[float, float]]]:\n return [(-1.0, 1.0)] * len(list(self.params()))", "def internal_bounds(self) -> tuple[float, float, float, float]:\n xres, yres = self.res\n w, s, e, n = self.bounds\n y0, y1 = (n, s) if yres < 0 else (s, n)\n x0, x1 = (e, w) if xres < 0 else (w, e)\n return x0, y0, x1, y1", "def constraint_level(self, soft_constraint):\n return soft_constraint.is_soft, len(soft_constraint.get_variables())", "def layout_constraints(self):\n widgets = self.visible_widgets()\n items = [self.leading_spacer] + widgets + [self.trailing_spacer]\n cns = self.constraints[:]\n cns.append(vbox(*items, spacing=self.spacing))\n cns.append(align('left', *widgets))\n return cns", "def rigid_body_constraints(self):\n phi_1 = constant_distance(self.r_i.symbolic_coordinates -\n self.r_j.symbolic_coordinates, self.length)\n phi_2 = constant_distance(self.u.symbolic_coordinates, 1)\n phi_3 = constant_distance(self.v.symbolic_coordinates, 1)\n phi_4 = perpendicular(self.u.symbolic_coordinates,\n self.v.symbolic_coordinates)\n phi_5 = perpendicular(self.r_i.symbolic_coordinates -\n self.r_j.symbolic_coordinates,\n self.u.symbolic_coordinates)\n phi_6 = perpendicular(self.r_i.symbolic_coordinates -\n self.r_j.symbolic_coordinates,\n self.v.symbolic_coordinates)\n\n self.constraints = [phi_1, phi_2, phi_3, phi_4, phi_5, phi_6]", "def __len__(self):\n return len(self.constraints)", "def get_model_parameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), rho=(0.0 ,inf))\n return params", "def get_equality_constraint_scaling_factors(self):\n return None", "def match_constraints(self) -> Optional['outputs.MatchResourcesPatch']:\n return pulumi.get(self, \"match_constraints\")", "def get_bounds(self, parameter_name=None):\n if parameter_name is None:\n return [self.get_bounds(p)\n for p in self.shape_parameters]\n if parameter_name in self.shape_parameters.keys():\n bounds = []\n for ll in self.likelihood_list:\n if parameter_name in ll.shape_parameters.keys():\n bounds.append(ll.get_bounds(parameter_name))\n bounds = np.array(bounds)\n ret= np.max(bounds[:,0]), np.min(bounds[:,1])\n if ret[1] <= ret[0]:\n raise InvalidParameterSpecification(\"lower bound %s higher than upper bound!\" % parameter_name)\n return ret\n\n elif parameter_name.endswith('_rate_multiplier'):\n return 0, float('inf')\n else:\n raise InvalidParameter(\"Non-existing parameter %s\" % parameter_name)", "def match_constraints(self) -> Optional['outputs.MatchResources']:\n return pulumi.get(self, \"match_constraints\")", "def _build_constraints_dict(model):\n constraints_dict = copy.deepcopy(model._constraints)\n\n # bounds are stored as strings so\n # they can be edited by the user.\n for name in constraints_dict['bounds']:\n bound1 = constraints_dict['bounds'][name][0]\n bound2 = constraints_dict['bounds'][name][1]\n constraints_dict['bounds'][name] = \"(%s,%s)\" % (str(bound1), str(bound2))\n\n # clean up. This is something that exists only\n # in single models and is not needed to rebuild\n # the model from its YAML description.\n if 'eqcons' in constraints_dict:\n constraints_dict.pop('eqcons')\n constraints_dict.pop('ineqcons')\n\n return constraints_dict", "def get_bounds(self):\n x_max = self.data['x'].max()\n y_max = self.data['y'].max()\n z_max = self.data['z'].max()\n print(\"x={}; y={}; z={}\".format(x_max, y_max, z_max))\n return (x_max, y_max, z_max)" ]
[ "0.80761766", "0.79321265", "0.79321265", "0.7608304", "0.75793535", "0.7431221", "0.72901845", "0.71523994", "0.7082262", "0.7023915", "0.6928313", "0.6927668", "0.69181097", "0.68804044", "0.6866544", "0.6732997", "0.6618143", "0.66156757", "0.66137815", "0.6589468", "0.658647", "0.65627575", "0.65003544", "0.6495402", "0.641059", "0.6391633", "0.6373397", "0.6363", "0.63268065", "0.63243544", "0.6320915", "0.6311139", "0.6311139", "0.6311139", "0.6311139", "0.6311139", "0.6311139", "0.6311139", "0.6311139", "0.62605155", "0.6236242", "0.6229711", "0.6202135", "0.62006736", "0.6198262", "0.61842674", "0.6169019", "0.61294746", "0.61289793", "0.6124525", "0.60482776", "0.6045209", "0.60430056", "0.60276467", "0.60046434", "0.599981", "0.5998119", "0.5997131", "0.59947646", "0.5992451", "0.5985576", "0.5970859", "0.596673", "0.5948259", "0.59478974", "0.59464526", "0.5940003", "0.59037143", "0.590286", "0.5891117", "0.58891183", "0.58891183", "0.58631647", "0.5854197", "0.58535886", "0.58470017", "0.5837449", "0.58121127", "0.5806755", "0.5799368", "0.5785571", "0.57776034", "0.57676744", "0.57675576", "0.5748346", "0.5741959", "0.5736538", "0.5734176", "0.5728522", "0.57193446", "0.57068294", "0.570502", "0.57033956", "0.5691205", "0.56879085", "0.5683032", "0.56705093", "0.5666639", "0.56536084", "0.56501883" ]
0.79009855
3
get the cvxpy variable associated with this layer
def get_cvxpy_variable(self, channel_indx=None): if channel_indx is None: output_channels = cp.hstack( [ self.layer_input[cur_channel_indx] for cur_channel_indx in range(self.n_in_channels) ] ) else: output_channels = self.layer_input[channel_indx] return output_channels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xvar ( self ) :\n return self.__xvar", "def x ( self ) :\n return self.xvar", "def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))", "def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))", "def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))", "def zvar ( self ) :\n return self.__zvar", "def getCvar(self, key):\n print \"get cvar %s\" % key\n return self.cvars.get(key)", "def getVariable(self, gradientCoordinate):\n return self.variables[gradientCoordinate]", "def get_variable(x):\n return x.cuda() #if use_cuda else x", "def getVariable(self):\n return _libsbml.Rule_getVariable(self)", "def var(self,i): # TODO: change to property to access (read only?) X?\n return Var(i,self.dims[i])", "def intrinsic_variable(self):\n if IVARG_ROLE in self.args:\n return self.args[IVARG_ROLE]\n return None", "def var(self) -> float:\n return self._data.var()", "def covar(self) -> np.ndarray:\n if self._covar is None:\n self._covar = batched_inv_spd(batched_cholesky(self._inv_covar))\n return self._covar", "def variable_vis(self):\n return self._variable_vis", "def to_var(self, x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)", "def get_model_var(self):\n return self.model_var", "def yvar ( self ) :\n return self.__yvar", "def var(self):\n return np.diag(self.covar)", "def _get_embedding_variable(self, layer_name):\n return self._tls._embed_variables.get(layer_name, None)", "def getVariable(self):\n return _libsbml.EventAssignment_getVariable(self)", "def name(self):\n return self.__nvXxPr.cNvPr.get('name')", "def get_variable(x, volatile=False):\n tensor = torch.cuda.LongTensor(x) if CUDA else torch.LongTensor(x)\n return autograd.Variable(tensor, volatile=volatile)", "def independent_variable(self):\n return self._independent_variable", "def var(self):\n\n return self.scale ** -2 \\\n * (m.gamma(1 + 2 * self.shape ** -1) - m.gamma(1 + self.shape ** -1) ** 2)", "def get_variable_value(self, name):\n return self._design.GetVariableValue(name)", "def const(self, name):\n return self.get_ground_vector('!Const:{}'.format(name))", "def const(self, name):\n return self.get_ground_vector('!Const:{}'.format(name))", "def const(self, name):\n return self.get_ground_vector('!Const:{}'.format(name))", "def V_var(self) -> Optional[np.ndarray]:\n\n def _retrieve(fm: VariationalFM) -> np.ndarray:\n return fm.V_var\n\n return runtime_error_to_optional(self, _retrieve)", "def var(self):\n if self._properties[\"var\"] is None:\n self._derive_variance_()\n return self._properties[\"var\"]", "def fcp(self, var='x'):\n return self.charpoly(var).factor()", "def getX(self):\n return self.__x", "def get_variable_from_model(self,modeltype,obsname):\n return get_variable_from_model(self.getmodel(modeltype),obsname)", "def get_variable(self, name):\n return self._properties[name]", "def get_stage_x(self):\n raise NotImplementedError", "def getX(self):\n return self.components[0]", "def getX(self):\n return self.components[0]", "def variable(initializer=None, shape=None, dtype=None, name=None, **kwargs):\n return get_var(name, shape=shape, dtype=dtype, initializer=initializer, **kwargs)", "def extract_2d_var(self,varname,file_idx):\n if varname in self.predictors:\n file = self.predictor_inventory[varname]['files'][file_idx]\n elif varname in self.predictands:\n file = self.predictand_inventory[varname]['files'][file_idx]\n else:\n raise ValueError(f'{varname} not a predictor or predictand') \n \n return Dataset(file).variables[varname][:]", "def x(self):\n return self[\"x\"]", "def getX(self):\n return self.proj.getX()", "def to_var( x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)", "def variable(self):\n return _coconut_tail_call(Var, self.name)", "def getX(self):\r\n\t\treturn self._x", "def _getLaplaceCovar(self):\n assert self.init, 'GP not initialised'\n assert self.fast is False, 'Not supported for fast implementation'\n\n if self.cache['Sigma'] is None:\n self.cache['Sigma'] = sp.linalg.inv(self._getHessian())\n return self.cache['Sigma']", "def value(self):\n return self._force.params[self.typepair][self.name] * (\n self._cpp_obj.alpha)", "def variable(self) -> Variable:\n ...", "def read_model_coord_var(case,varname):\n filelist = create_file_list(case)\n fpin = Nio.open_file(filelist[0],'r')\n data = fpin.variables[varname][:]\n fpin.close()\n return data", "def get_stage_xyz(self):\n raise NotImplementedError", "def z ( self ) :\n return self.zvar", "def var(self) -> FrameLike:\n return super().var()", "def var(self) -> FrameLike:\n return super().var()", "def var(self) -> FrameLike:\n return super().var()", "def var(self) -> FrameLike:\n return super().var()", "def get_value(self, x):\n return np.dot(self.w, x)", "def get_variable(self, variable_name):\n with self._graph.as_default():\n return self._sess.run(self._get_tensor_by_name(variable_name))", "def x0(self):\n return self.params['x0']", "def nCx(self):\n return int(self._n[0])", "def findX(self):\n return self.x", "def get_variable(self, name):\n if self._scalamagic:\n intp = self.scala_interpreter\n intp.interpret(name)\n return intp.last_result()", "def getConstant(self):\n return _libsbml.LocalParameter_getConstant(self)", "def vx0(self):\n return self.params['vx0']", "def covar(self):\n a, c, d, b = self.to_ccw()\n return a * d - b * c", "def dependent_variable(self):\n return self._dependent_variable", "def getX(self):\n return self.x", "def get_coefficient(self, var):\n if not isinstance(var, mp_variable.MPVariable):\n raise TypeError()\n return float(self._expr.coeff(var))", "def _var(self):\n return self.sumsquares / self.sum_weights", "def xyz(self):\n return self._xyz", "def get_variables(self) -> np.array:\n return np.array([self.m, self.c])", "def get_var(self, chrom, *pargs, **kwargs):\n oname = \"vcf_%s\" % str(chrom)\n\n if oname not in self._obj:\n raise AttributeError(\"Could not find chromosome '%s'\" % chrom)\n #fi\n\n return self._obj[oname].get_var(chrom, *pargs, **kwargs)", "def GetX(self):\r\n\r\n return self._x", "def id(self):\n return int(self.__nvXxPr.cNvPr.get('id'))", "def read_var(self, ncvar):\n ncf = Dataset(self.fname)\n dat = ncf.variables[ncvar][:]\n ncf.close()\n return dat", "def get_layer_var_names(self):\n return(self.params)", "def X(self):\n return self.x\n pass", "def get_variables(self) -> np.array:\n pass", "def get_airflow_variable(key: str) -> str:\n return models.Variable.get(key)", "def get_variable(self, col: str, name: str, default: T = None) -> T:\n if self.scope is None:\n raise ValueError(\"Can't access variables on unbound modules\")\n return self.scope.get_variable(col, name, default)", "def getXVelocity(self):\n return self.xvelocity", "def getChemCompVars(self):\n dataDict = self.__dict__\n result = self.boundLinkAtom.chemCompVars\n return result", "def getConstant(self):\n return _libsbml.Parameter_getConstant(self)", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def x(self):\n return self.__x", "def getChemCompVars(self):\n dataDict = self.__dict__\n result = self.specificChemCompVars\n if not result:\n result = self.getByNavigation('namingSystem', 'chemComp', 'chemCompVars')\n return result", "def var(self, init_repr, name):\n randomness = self.get_ground_vector('!Var:{}-Var'.format(name))\n return self.varmodel(torch.cat([init_repr, randomness]))", "def __getitem__(self, key):\n return self.variables[key]", "def x(self):\n return self.x", "def getConstellation(self):\n return self._const", "def get_var_soln(self, label):\n\n assert self.var_array is not None, 'calculate_diagnostics has not been run'\n i_label = self.var_labels.index(label)\n return self.var_array[:, i_label]", "def getConstant(self):\n return _libsbml.Compartment_getConstant(self)" ]
[ "0.6952394", "0.6349231", "0.6311073", "0.6311073", "0.6311073", "0.6210869", "0.614176", "0.61036706", "0.6093137", "0.5973595", "0.5904504", "0.58920914", "0.5774172", "0.5744649", "0.5726218", "0.5713547", "0.56973785", "0.56402665", "0.5625536", "0.5621677", "0.5595852", "0.559292", "0.55797166", "0.5579235", "0.5548584", "0.554761", "0.5533927", "0.5533927", "0.5533927", "0.55308187", "0.55008507", "0.5459748", "0.5437876", "0.5436519", "0.5434445", "0.54286104", "0.5427135", "0.5427135", "0.54263085", "0.5417836", "0.5410218", "0.53916335", "0.53904676", "0.53883195", "0.5387648", "0.538562", "0.5377186", "0.53557146", "0.5353151", "0.53521943", "0.5351345", "0.53447527", "0.53447527", "0.53447527", "0.53447527", "0.5329428", "0.53282", "0.5323132", "0.5318085", "0.5313153", "0.5311223", "0.53069115", "0.5305416", "0.53030306", "0.5300001", "0.52982527", "0.52859634", "0.5258245", "0.5257541", "0.5257425", "0.525133", "0.5250849", "0.52465075", "0.5235915", "0.5235028", "0.5232301", "0.5225024", "0.52213717", "0.52159375", "0.52150846", "0.5213681", "0.5209941", "0.5209032", "0.5209032", "0.5209032", "0.5209032", "0.5209032", "0.5209032", "0.5209032", "0.5209032", "0.5209032", "0.5209032", "0.5209032", "0.5205084", "0.51835656", "0.51756746", "0.5169341", "0.5163708", "0.51618755", "0.5155883" ]
0.7194828
0
compute the output of this layer based on the weights biases and decision variable
def get_computation_layer(self, channel_indx=0): if channel_indx is None: return self._get_multi_channel_output_flat() normalized_batch = ( self.layer_input[channel_indx] - self.running_mean[channel_indx] ) / (np.sqrt(self.running_var[channel_indx] + self.epsilon)) if self.affine: return (normalized_batch * self.weights[channel_indx]) + self.bias[ channel_indx ] return normalized_batch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_outputs(self, *args, **kwargs):\n pass\n # self.outputs = self.model(input_ids=self.input_ids, masked_lm_labels=self.input_ids)\n # self.logits = self.outputs[0][0]\n # self.probs = torch.softmax(self.logits, 1)", "def get_output(weight, data, regression= \"logistic\"):\n dot_product = np.matmul(data,weight)\n if regression == \"logistic\":\n output = get_sigmoid(dot_product)\n elif regression == \"probit\":\n output = norm.cdf(dot_product)\n elif regression == \"multiclass\":\n output = softmax(dot_product, axis=1)\n\n return output, dot_product", "def compute_output(self):\n x, y = self.input_nodes\n print(x.name, y.name)\n self.output_value = backend.dot(x.output_value, y.output_value)\n return self.output_value", "def _learn_node_parameter_var(outputs, weights, inputs):\n var = 0.\n\n \"\"\" YOUR CODE HERE \"\"\"\n temp = 0\n N_observe = outputs.shape[0]\n if inputs is None:\n temp = np.sum((outputs-weights[0])**2)\n else:\n for i in range(N_observe):\n temp += (outputs[i] - (np.sum(weights[1:] * inputs[i]) +weights[0]))**2\n var = temp/N_observe\n\n\n\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return var", "def compute(self, example):\n activations = []\n if self.hidden > 0:\n for i in xrange(self.hidden):\n output = self.vis_layer[i].compute(example)\n activations.append(output)\n activations.append(1.0)\n for layer in xrange(self.layers):\n hidden_activations = []\n for i in xrange(self.hidden):\n hidden_activations.append(self.hidden_layers[layer][i].compute(activations))\n hidden_activations.append(1.0)\n activations = hidden_activations\n output = self.output_neuron.compute(activations)\n else:\n output = self.output_neuron.compute(example)\n return Network.threshold(output)", "def engage(self):\n # no sigmoid for the inputs and bias\n if layer != 0:\n self.outputValue = sigmoid(inputSum);\n\n for connection in self.outputConnections:\n if connection.enabled == True:\n #connection will have toNode\n connection.toNode.inputSum += connection.weight * self.outputValue;", "def cost(self, output, labels, weights):\n raise NotImplementedError('Must be overridden by concrete subclass')", "def compute_output(self):\n x, y = self.input_nodes\n self.output_value = backend.multiply(x.output_value, y.output_value)\n return self.output_value", "def Get_Output(self, hidden_state):\n output = tf.nn.relu(tf.matmul(hidden_state, self.Wo) + self.bo)\n\n return output", "def model(self, inputs):\n h1 = dense(inputs, self.weights[0], self.biases[0], tf.nn.relu) #hidden layer 1\n h2 = dense(h1, self.weights[1], self.biases[1], tf.nn.relu) #hidden layer 2\n\n out = dense(h2, self.weights[2], self.biases[2])\n\n return out", "def action(self, observations):\n # First hidden layer\n z1 = np.dot(self.w1, observations).reshape((2,1)) + self.b1\n a1 = self.relu(z1)\n \n # Second hidden layer\n z2 = np.dot(self.w2, a1) + self.b2\n a2 = self.relu(z2)\n\n # Third layer (output\n z3 = np.dot(self.w3, a2) + self.b3\n a3 = self.tanh(z3)\n \n # Get the output \n return 1 if a3 >= 0 else 0", "def get_hidden_values(self, input):\r\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def get_hidden_values(self, input):\r\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def compute_output(self):\n x, y = self.input_nodes\n self.output_value = backend.add(x.output_value, y.output_value)\n return self.output_value", "def get_output(self, X):\n return X.dot(self.W) + self.b", "def getDecision(self):\n temp = []\n temp.append( np.array(self.scan) )\n for i,wt in enumerate(self.weights):\n #print(self.bias[i],temp[i].dot(wt))\n temp.append(np.add(temp[i].dot(wt),self.bias[i]))\n #print(str(self.bias) + \" \" + str(wt))\n return temp[len(self.weights)].tolist() # np.add(np.add(np.add(self.scan.dot(self.weights[0]), self.bias[0]).dot(self.weights[1]),self.bias[1]).dot(self.weights[2]),self.bias[2]).T", "def _learn_node_parameter_w(outputs, inputs=None):\n num_inputs = 0 if inputs is None else inputs.shape[1]\n weights = np.zeros(shape=num_inputs + 1)\n\n \"\"\" YOUR CODE HERE \"\"\"\n # Ax = b, A\n N_observe = outputs.shape[0]\n A = np.zeros(shape = (num_inputs+1, num_inputs+1))\n for i in range(A.shape[0]):\n for j in range(A.shape[1]):\n if i==0 and j==0:\n A[i][j] = N_observe\n elif i==0 and j!=0:\n A[i][j] = np.sum(inputs[:,j-1])\n elif i!=0 and j==0:\n A[i][j] = np.sum(inputs[:,i-1])\n else:\n for k in range(N_observe):\n A[i][j] += inputs[k,i-1]*inputs[k, j-1]\n b = np.zeros(shape=num_inputs + 1)\n for i in range(len(b)):\n if i==0:\n b[i] = np.sum(outputs)\n else:\n for k in range(N_observe):\n b[i] += inputs[k,i-1]*outputs[k]\n\n weights = np.linalg.solve(A, b)\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return weights", "def predict(self, X):\n W, b = self.W, self.b\n H = W.dot(X) + b # (out, in) x (in, train) + (out, 1) = (out, train)\n return self.activation(H)", "def compute_output(self, input_data, no_update_wsi=False):\n\t\t# compute weighted sum of inputs\n\t\tif not no_update_wsi:\n\t\t\tself.compute_wsi(input_data)\n\t\t# compute output based on initialization\n\t\tif self.activation_type == 'step':\n\t\t\tself.output = Neuron.step_function(self.wsi)\n\t\telif self.activation_type == 'sigmoidal':\n\t\t\tself.output = Neuron.sigmoidal_function(self.wsi, self.af_param)\n\t\telif self.activation_type == 'hyperbolic':\n\t\t\tself.output = Neuron.hyperbolic_function(self.wsi)\n\t\telif self.activation_type == 'gaussian':\n\t\t\tself.output = Neuron.gaussian_function(self.wsi)", "def cost(self, output, labels, weights):\n return tf.multiply(0.5 * tf.square(output - labels), weights)", "def __call__(self, inputs):\n return self._hidden_activation(inputs)", "def get_outputs(self, rov_id):\n count = 0 # Keeps count of which weight is being applied\n self.reset_layers(rov_id)\n\n # for i in range(self.n_inputs):\n # self.in_layer[rov_id, i] = self.tanh(self.in_layer[rov_id, i])\n\n for i in range(self.n_inputs): # Pass inputs to hidden layer\n for j in range(self.n_nodes):\n self.hid_layer[rov_id, j] += self.in_layer[rov_id, i] * self.weights[rov_id, count]\n count += 1\n\n for j in range(self.n_nodes): # Add Biasing Node\n self.hid_layer[rov_id, j] += (self.input_bias * self.weights[rov_id, count])\n count += 1\n\n for i in range(self.n_nodes): # Pass through sigmoid\n self.hid_layer[rov_id, i] = self.tanh(self.hid_layer[rov_id, i])\n\n for i in range(self.n_nodes): # Pass from hidden layer to output layer\n for j in range(self.n_outputs):\n self.out_layer[rov_id, j] += self.hid_layer[rov_id, i] * self.weights[rov_id, count]\n count += 1\n\n for j in range(self.n_outputs): # Add biasing node\n self.out_layer[rov_id, j] += (self.hidden_bias * self.weights[rov_id, count])\n count += 1\n\n for i in range(self.n_outputs): # Pass through sigmoid\n self.out_layer[rov_id, i] = self.tanh(self.out_layer[rov_id, i])", "def output_layer_activation(x):\n return x", "def get_hidden_values(self, input):\n return T.nnet.sigmoid(T.dot(input, self.W) + self.bh)", "def output (self) -> np.ndarray :\n return RNN.sigmoid (self.Wout.dot (self.xvec))", "def __init__(self, input, n_out, y):\n n_in = input.get_shape()[1].value\n self.input = input\n\n # Initiate the weight and biases for this layer\n r = 4*np.sqrt(6.0/(n_in + n_out))\n w = tf.Variable(tf.random_uniform([n_in, n_out], minval=-r, maxval=r))\n b = tf.Variable(tf.zeros([n_out]), name='b')\n\n pred = tf.add(tf.matmul(input, w), b)\n ################\n temp = tf.nn.softmax(pred)\n\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n\n # Evaluate model\n correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n \n self.y = y\n self.w = w\n self.b = b\n self.cost = cost\n ###############\n self.temp = temp\n self.params= [w]", "def compute(self, inputs):\n\t\tres = inputs\n\t\tfor layer in range(self.layersNumber):\n\t\t\tweight = self.weights[layer]\n\t\t\tbias = self.biases[layer]\n\t\t\tres = fActivation(np.dot(weight, res) + bias)\n\t\treturn res", "def get_hidden_values(self, input):\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def get_hidden_values(self, input):\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def get_outputs():\n all_hidden_states = get_states()\n all_attention = tf.map_fn(get_attention, all_hidden_states)\n a_values = tf.nn.softmax(all_attention, axis = 0)\n final_hidden_state = tf.einsum('ijk,ijl->jkl', a_values, \n all_hidden_states)\n output = tf.nn.sigmoid(tf.matmul(final_hidden_state[:,0,:], Wo) + bo, \n name='outputs')\n return output, a_values", "def add_prediction_op(self):\n ### YOUR CODE HERE\n W = tf.Variable(tf.zeros((Config.n_features,Config.n_classes)), dtype=tf.float32)\n b = tf.Variable(tf.zeros(Config.n_classes), dtype=tf.float32)\n pred = softmax(tf.matmul(self.input_placeholder,W)+b)\n ### END YOUR CODE\n return pred", "def compute_output_from_current_state(self):\n\n assert self.Wout is not None, \"Matrix Wout is not initialized/trained yet\"\n\n self.output_values = (self.Wout @ self.state).astype(self.typefloat)\n return self.output_values.copy().ravel()", "def compute_prediction(X, weights):\r\n z = np.dot(X, weights) # dot product\r\n predictions = sigmoid(z)\r\n return predictions", "def __call__(self, x):\n return self._pre_scale * tf.matmul(x, self._weight) + self._bias", "def calculate_output(self):", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n # Perform convolution\n conv = conv2d(incoming, self.W, strides=self.strides, padding=self.padding,\n dilation_rate=self.dilation_rate)\n \n # Add bias\n if self.b is not None:\n conv += self.b\n \n # Apply activation function\n self.out = self.a(conv)\n \n return self.out", "def predict(self, input):\n a = np.dot(np.transpose(self.final_weights), input)\n prob = self.sigmoid(a)\n return 1 if prob > 0.5 else 0", "def predict(self, X):\n \n p = sigmoid(np.matmul(self.weight.T , X) + self.bias)\n return p", "def out(input_lst, weight_lst, bias):\r\n return 1 / (1 + math.exp(-1 * net(input_lst, weight_lst, bias)))", "def fit(self, input, output):\n last = self.w0 # equivalent to wk in the loop\n for iteration in range(self.descents):\n sum_over_i = [0.0] * len(last)\n for i in range(len(input)):\n wtx = np.dot(np.transpose(last), input[i, :])\n sum_over_i = np.add(sum_over_i, input[i, :] * (output[i] - self.sigmoid(wtx)))\n last = np.add(last, self.learning_rate * sum_over_i)\n self.final_weights = last", "def decision_function(alphas, target, kernel, X_train, x_test, b):\n\n result = (alphas * target) @ kernel(X_train, x_test) - b\n return result", "def mlpfwd(self,inputs):\n\n self.hidden = np.dot(inputs,self.weights1);\n self.hidden = 1.0/(1.0+np.exp(-self.beta*self.hidden))\n self.hidden = np.concatenate((self.hidden,-np.ones((np.shape(inputs)[0],1))),axis=1)\n\n outputs = np.dot(self.hidden,self.weights2);\n\n # Different types of output neurons\n if self.outtype == 'linear':\n \treturn outputs\n elif self.outtype == 'logistic':\n return 1.0/(1.0+np.exp(-self.beta*outputs))\n elif self.outtype == 'softmax':\n normalisers = np.sum(np.exp(outputs),axis=1)*np.ones((1,np.shape(outputs)[0]))\n return np.transpose(np.transpose(np.exp(outputs))/normalisers)\n else:\n print \"error\"", "def predict(self, X_):\n X = np.c_[np.ones(X_.shape[0]), X_] # add bias variable 1\n pred = np.zeros(X.shape[0])\n ### YOUR CODE HERE\n z = X.dot(self.w)\n predictions = soft_reg.softmax(z)\n pred = np.argmax(np.round(predictions), axis=1)\n ### END CODE\n return pred", "def build_layer(self) :\n inputsWithBias = self.input_count + 1\n self.weights = np.random.rand(inputsWithBias, self.node_count)\n self.weights_and_activations = (self.weights, self.activations)", "def nnPredict(w1,w2,data):\n \n labels = np.array([])\n \n # create bias row\n bias_row =np.ones((np.size(data,0),1))\n \n # concatenate bias with data matrix\n data=np.concatenate((data,bias_row),axis=1)\n \n #Calculate input to hidden layer\n intput_hidden_layer= np.dot(data,w1.transpose()) \n \n #Calculate output of hidden layer using sigmoid function\n output_hidden_layer= sigmoid(intput_hidden_layer)\n \n #Calculate input to output nodes\n input_with_bias = np.concatenate((output_hidden_layer,bias_row),axis=1) \n input_output_node= np.dot(input_with_bias,w2.transpose()) \n \n # Calculate output of output layer\n output_layer= sigmoid(input_output_node) \n \n # get index of maximum from all rows in ouput layer matrix\n labels = np.argmax(output_layer,axis=1) \n \n return labels", "def output(self, input, in_features, out_features,reuse=False):\n # with tf.variable_scope(self.name):\n # print('f'*20,input.get_shape().as_list(),in_features,out_features)\n w=self._create_weight([self.cnn_size,self.cnn_size,in_features,out_features],name='Wfn')\n out=self._conv2d(input,w,[1, self.cnn_stride, self.cnn_stride, 1],pre_name='convfn')\n return out", "def apply_weights(self):\n return self.X.dot(self.get_weights())", "def model(self, img, label, bias, filters):\n prediction, z, flat, layers = self.predict(bias, img, filters)\n\n loss = self.categorical_crossentropy(prediction, label)\n\n # backpropagation\n dout = prediction - np.asarray(label).reshape((15, 1))\n dflat, dw8, db8, dw7, db7 = self.dense_layer_backprop(dout, flat, filters[6:8], bias[6:8], z)\n\n dconv6 = dflat.reshape(layers[-1].shape)\n dconv6[layers[-1] <= 0] = 0\n dconv5, df6, db6 = self.conv_layer_backprop(dconv6, layers[-2], filters[5])\n dconv5[layers[-2] <= 0] = 0\n dpool2, df5, db5 = self.conv_layer_backprop(dconv5, layers[-3], filters[4])\n dconv4 = self.pooling_layer_backprop(dpool2, layers[-4])\n dconv4[layers[-4] <= 0] = 0\n dconv3, df4, db4 = self.conv_layer_backprop(dconv4, layers[-5], filters[3])\n dconv3[layers[-5] <= 0] = 0\n dpool1, df3, db3 = self.conv_layer_backprop(dconv3, layers[-6], filters[2])\n dconv2 = self.pooling_layer_backprop(dpool1, layers[-7])\n dconv2[layers[-7] <= 0] = 0\n dconv1, df2, db2 = self.conv_layer_backprop(dconv2, layers[-8], filters[1])\n dconv1[layers[-8] <= 0] = 0\n dimg, df1, db1 = self.conv_layer_backprop(dconv1, img[0], filters[0])\n\n weight_gradients = [df1, df2, df3, df4, df5, df6, dw7, dw8]\n bias_gradients = [db1, db2, db3, db4, db5, db6, db7, db8]\n\n return weight_gradients, bias_gradients, loss", "def add_prediction_op(self, outputs):\n dropout_rate = self.dropout_placeholder\n U = tf.get_variable(\"OutputWeights\", shape = (self.config.hidden_size, self.config.n_classes), initializer = tf.contrib.layers.xavier_initializer())\n b_2 = tf.get_variable(\"OutputBias\", shape = (self.config.n_classes), initializer = tf.zeros_initializer())\n\n outputs = tf.nn.dropout(outputs, dropout_rate) \n\n outputs = tf.reshape(outputs, [-1, self.config.hidden_size]) \n preds = tf.add(tf.matmul(outputs, U), b_2)\n preds = tf.reshape(preds, [self.config.batch_size, -1, self.config.n_classes])\n #preds = tf.Print(preds, [preds], summarize = self.config.n_classes)\n return preds", "def call(self, inputs, states):\r\n (out_prev, Vm_prev) = states\r\n\r\n #Vm = Vm_prev * (1.0 - out_prev)\r\n #Lateral inhibition logic:\r\n Vm = Vm_prev * (1.0 - tf.reduce_max(out_prev))\r\n\r\n Vm = Vm * self.decay\r\n Vm = Vm + tf.matmul(inputs, self.kernel)\r\n if self.recurrent:\r\n Vm = Vm + tf.matmul(out_prev, self.recurrent_kernel)\r\n Vm = self.g(Vm)\r\n overVth = Vm - self.bias\r\n out = self.activation(overVth)\r\n return out, (out, Vm)", "def output_layer(self, h_, labels_):\n with tf.name_scope(\"Output_Layer\"):\n self.W_out_ = tf.get_variable(\"W_out\", shape=[h_.get_shape()[1].value,len(self.embed[0])], initializer=tf.random_normal_initializer())\n self.b_out_ = tf.get_variable(\"b_out\", shape=[len(self.embed[0])], initializer=tf.zeros_initializer())\n self.logits_ = tf.add(tf.matmul(h_,self.W_out_),self.b_out_)\n self.activated_out_ = tf.tanh(self.logits_) \n\n with tf.name_scope(\"Loss\"):\n self.loss_ = tf.reduce_sum(tf.square(tf.norm(tf.subtract(labels_, self.activated_out_),axis=1)))\n self.optimizer_ = tf.train.AdamOptimizer(learning_rate = self.learning_rate_)\n gradients_, variables_ = zip(*self.optimizer_.compute_gradients(self.loss_))\n clipped_grads_, _ = tf.clip_by_global_norm(gradients_, self.max_grad_norm_)\n self.train_step_ = self.optimizer_.apply_gradients(zip(clipped_grads_,variables_))", "def __compute_prediction(self, x):\n z = np.dot(x, self.__weights)\n predictions = sigmoid_activation_function(z)\n return predictions", "def create(self):\n output = None\n if self.output_bias is not None:\n output_bias = tf.keras.initializers.Constant(self.output_bias)\n else:\n output_bias = None\n kernel_init = None\n if self.activation_name == 'relu' or self.activation_name == 'elu':\n # Kaiming He initialization\n kernel_init = tf.keras.initializers.he_normal()\n elif self.activation_name == 'selu':\n # LeCun initialization\n kernel_init = tf.keras.initializers.lecun_uniform()\n elif self.activation_name == 'tanh' or self.activation_name == \\\n 'sigmoid':\n # Xavier Glorot initialization\n kernel_init = tf.keras.initializers.glorot_uniform()\n if self.layer_type == 'dense':\n kernel_reg = tf.keras.regularizers.l1_l2(l1=self.l1_reg,\n l2=self.l2_reg)\n output = tf.keras.layers.Dense(units=self.n_nodes,\n activation=self.activation_name,\n bias_initializer=output_bias,\n kernel_initializer=kernel_init,\n kernel_regularizer=kernel_reg,\n name=self.name)\n # elif self.layer_type == 'clipping':\n # output = Clipping(self.upper)\n elif self.layer_type == 'dropout':\n output = tf.keras.layers.Dropout(rate=self.dropout_rate,\n name=self.name)\n return output", "def neural_net_predict(self, inputs):\n for W, b in self.params:\n outputs = np.dot(inputs, W) + b\n inputs = np.tanh(outputs)\n return outputs # - logsumexp(outputs, axis=1, keepdims=True)", "def get_output(self, **kwargs):\n with tf.variable_scope(self.layer_scope):\n return self.out", "def get_bias(self):", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = incoming * self.factor\n return self.out", "def call(self, inputs):\n\n x = tf.matmul(inputs, self.w) + self.b\n x = self.activation(x)\n\n return x", "def __call__(self, in_state, labels, label_mask, predict=False):\n t_ = tf.matmul(in_state, self._W_sftm) + self._B_sftm # t_: [batch_size * class_num]\n #t_ = tf.expand_dims(label_mask, 1) * t_\n t_sftm_ = self._activation(t_)\n if not predict:\n #labels_1hot = tf.one_hot(labels, self._class_num, 1.0, 0.0)\n loss = self._loss_f(t_, labels)\n loss = loss * label_mask\n return tf.argmax(t_sftm_, 1), t_sftm_, loss\n else:\n return tf.argmax(t_sftm_, 1), t_sftm_", "def forward(self, input):\n label=np.dot(input,self.w)+self.b\n return label", "def calculate_output(self, input_par):\r\n\r\n return self.meta_model.calculate_output(input_par)", "def calculate(self, inputs:[bool]):\n\n w_som = 0\n outputs = []\n for i in range(len(inputs)):# iterate through the index inputs e.g [0,0]\n weight = self.weights[i] # get weight \n x = inputs[i] # get x\n\n w_som += (weight*x) # increment w_som with the multiplication of weighti and xi\n output = self.activation(w_som) # apply the step function to w_Som\n #print(outputs)\n return output", "def predict(features, weights, bias):\n z = pre_activation(features, weights, bias)\n # Get normalized scores\n y = activation(z)\n # Get 0 or 1 value\n return np.round(y)", "def predict(self):\n add = np.ones(len(self.X_test))\n X_add = np.c_[add, self.X_test]\n pred = np.dot(X_add, self.w_result.T)\n\n pred[pred > 0] = 1\n pred[pred < 0] = 0\n return pred", "def predict(self, X):\n # Multiply the weight matrix, W, by the input matrix X\n results = np.dot(self.weights, X)\n\n if self.transfer_function == \"Hard_limit\":\n actualResults = np.where(results < 0, 0, 1)\n else:\n actualResults = results\n return actualResults\n \n #print(actualResults)", "def output(self, inputs):\n self._in_j = self._input(inputs) #Previous weighted inputs\n return self._g(self._in_j)", "def evaluate_prediction(self, model_output, reference):\n with torch.no_grad():\n cross_entropy = self._criterion(\n model_output.view(-1, self.model.ntokens), reference.contiguous().view(-1))\n return {\n \"cross_entropy\": cross_entropy.detach(),\n \"perplexity\": torch.exp(cross_entropy).detach(),\n }", "def output(self,x=None,h=None,in_features=0,in_features_h=0,reuse=False):\n assert (x is not None or h is not None)\n print('re'*10,reuse , self.name)\n stride=[1, self.cnn_stride, self.cnn_stride, 1]\n with tf.variable_scope(self.name):\n Wxi=self._create_weight([self.cnn_size, self.cnn_size, in_features, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Wxi')\n Whi=self._create_weight([self.cnn_size, self.cnn_size, in_features_h, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Whi')\n Wxf=self._create_weight([self.cnn_size, self.cnn_size, in_features, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Wxf')\n Whf=self._create_weight([self.cnn_size, self.cnn_size, in_features_h, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Whf')\n Wxc=self._create_weight([self.cnn_size, self.cnn_size, in_features, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Wxc')\n Whc=self._create_weight([self.cnn_size, self.cnn_size, in_features_h, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Whc')\n Wxo=self._create_weight([self.cnn_size, self.cnn_size, in_features, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Wxo')\n Who=self._create_weight([self.cnn_size, self.cnn_size, in_features_h, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Who')\n Wci=self._create_weight([1,self.height , self.width, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='ele_Wci')\n Wcf=self._create_weight([1,self.height , self.width, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='ele_Wcf')\n Wco=self._create_weight([1,self.height , self.width, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='ele_Wco')\n if not reuse and h is None:\n print('xr'*20,x.get_shape().as_list(),in_features,self.out_features,in_features_h)\n h=tf.zeros((1,self.height,self.width,self.out_features))\n self._input=tf.sigmoid(self._conv2d(x,Wxi,stride=stride,pre_name='Wxi')+self._conv2d(h,Whi,use_bias=True,stride=stride,pre_name='Whi')+Wci*self._cell)\n self._forget=tf.sigmoid(self._conv2d(x,Wxf,stride=stride,pre_name='Wxf')+self._conv2d(h,Whf,use_bias=True,stride=stride,pre_name='Whf')+Wcf*self._cell)\n self._cell=self._forget*self._cell+self._input*tf.tanh(self._conv2d(x,Wxc,stride=stride,pre_name='Wxc')+self._conv2d(h,Whc,stride=stride,use_bias=True,pre_name='Whc'))\n self._output=tf.sigmoid(self._conv2d(x,Wxo,stride=stride,pre_name='Wxo')+self._conv2d(h,Who,use_bias=True,stride=stride,pre_name='Who')+Wco*self._cell)\n else:\n # print('x'*10,x.shape,'\\\\n Wxi',Wxi.shape,'\\\\n h ',h.shape,Whi.shape,'\\\\n c ',Wci.shape)\n if h is None:\n # print('x'*20,x.get_shape().as_list(),in_features,out_features)\n self._input=tf.sigmoid(self._conv2d(x,Wxi,stride=stride,pre_name='Wxi',use_bias=True)+Wci*self._cell)\n self._forget=tf.sigmoid(self._conv2d(x,Wxf,stride=stride,pre_name='Wxf',use_bias=True)+Wcf*self._cell)\n self._cell=self._forget*self._cell+self._input*tf.tanh(self._conv2d(x,Wxc,stride=stride,pre_name='Wxc',use_bias=True))\n self._output=tf.sigmoid(self._conv2d(x,Wxo,stride=stride,pre_name='Wxo',use_bias=True)+Wco*self._cell)\n elif x is None:\n # print('h'*20,h.get_shape().as_list(),in_features_h,out_features)\n self._input=tf.sigmoid(self._conv2d(h,Whi,use_bias=True,stride=stride,pre_name='Whi')+Wci*self._cell)\n self._forget=tf.sigmoid(self._conv2d(h,Whf,use_bias=True,stride=stride,pre_name='Whf')+Wcf*self._cell)\n self._cell=self._forget*self._cell+self._input*tf.tanh(self._conv2d(h,Whc,stride=stride,use_bias=True,pre_name='Whc'))\n self._output=tf.sigmoid(self._conv2d(h,Who,use_bias=True,stride=stride,pre_name='Who')+Wco*self._cell)\n else:\n # print('xh'*20,x.get_shape().as_list(),in_features,out_features,in_features_h)\n self._input=tf.sigmoid(self._conv2d(x,Wxi,stride=stride,pre_name='Wxi')+self._conv2d(h,Whi,use_bias=True,stride=stride,pre_name='Whi')+Wci*self._cell)\n self._forget=tf.sigmoid(self._conv2d(x,Wxf,stride=stride,pre_name='Wxf')+self._conv2d(h,Whf,use_bias=True,stride=stride,pre_name='Whf')+Wcf*self._cell)\n self._cell=self._forget*self._cell+self._input*tf.tanh(self._conv2d(x,Wxc,stride=stride,pre_name='Wxc')+self._conv2d(h,Whc,stride=stride,use_bias=True,pre_name='Whc'))\n self._output=tf.sigmoid(self._conv2d(x,Wxo,stride=stride,pre_name='Wxo')+self._conv2d(h,Who,use_bias=True,stride=stride,pre_name='Who')+Wco*self._cell)\n h=self._output*tf.tanh(self._cell)\n\n return h", "def bv_weight(self, output_prop):\n raise NotImplementedError(\"subclasses need to override this method\")", "def forward_propagate(self, x):\n self.z_h = np.dot( x, self.w_ih ) + self.b_h\n #Activations of hidden layer\n self.a_h = self.sigmoid( self.z_h )\n self.z_o = np.dot( self.a_h, self.w_ho ) + self.b_o\n #yEst = activations of output layer\n yEst = self.sigmoid( self.z_o )\n return yEst", "def __call__(self, x):\n return np.dot(x, self.weights[-1])", "def predict(x,w,bb):\r\n return 2*((x.dot(w)+bb)>0)-1", "def get_hidden_values(self):\n\n return T.nnet.sigmoid(T.dot(self.x, self.W) + self.b)", "def predict(self, x, weight):\n pred = np.dot(x, weight)\n pred[np.where(pred <= 0)] = -1\n pred[np.where(pred > 0)] = 1\n return pred", "def output(self):\n # print \"Neuron output\"\n\n if self.output_cache is not None:\n # print self, \"returning from cache\"\n return self.output_cache\n\n self.inputs_cache = []\n\n sum = 0\n for input_edge in self.inputs:\n input = input_edge.from_.output()\n self.inputs_cache.append(input)\n sum += input * input_edge.w\n\n self.output_cache = sigmoid(sum)\n # print \"node output:\", self.output_cache, sum\n return self.output_cache", "def compute(self, pred, target):\n pass", "def neural_result(self, input):\n n_output = self.network.activate(input)\n if n_output >= 0.5:\n return 2\n else:\n return 1", "def output_transform(x: Tensor, y: Tensor,\n y_pred: Tensor, loss: Tensor) -> ret_type:\n return y_pred, y, loss.item()", "def __call__(self, inputs: np.ndarray):\n # Denote the impact the inputs have directly on the outputs\n output_inputs: np.ndarray = np.matmul(self.in2out, inputs.transpose()).transpose()\n \n # Denote the impact hidden nodes have on the outputs, if there are hidden nodes\n if self.n_hidden > 0:\n # Nice to know:\n # - np.transpose() will transpose the tensor\n # - np.matmul(tensor1, tensor2) will perform a matrix multiplication between tensor and tensor2\n \n # The activation is defined by:\n # - the inputs mapping to the hidden nodes\n # - the hidden nodes mapping to themselves\n # - the hidden nodes' biases\n \n # 1) Propagate the hidden nodes\n self.hidden_act = self.act_f(np.matmul(self.in2hid, inputs.transpose()).transpose() +\n np.matmul(self.hid2hid, self.hidden_act.transpose()).transpose() +\n self.hidden_biases)\n \n # 2) Execute the RNN nodes if they exists (updating current hidden state)\n for i, rnn_idx in enumerate(self.rnn_idx):\n self.rnn_state[:, i] = self.rnn_array[i](\n np.concatenate((self.in2hid[rnn_idx] * inputs,\n self.hid2hid[rnn_idx] * self.hidden_act),\n axis=1)[self.rnn_map[i]].reshape(self.bs, self.rnn_array[i].input_size)\n )\n self.hidden_act[:, rnn_idx] = self.rnn_state[:, i, 0]\n \n # 3) Propagate hidden-values to the outputs\n output_inputs += np.matmul(self.hid2out, self.hidden_act.transpose()).transpose()\n \n # Define the values of the outputs, which is the sum of their received inputs and their corresponding bias\n self.output_act = self.act_f(output_inputs + self.output_biases)\n return self.output_act", "def evaluate(self, input):\n\t\treturn self.function(np.dot(self.weights, np.array([-1] + list(input))))", "def __call__(self, y, pred, sample_weight=None):", "def EvaluateLayer(X, W, b):\n S = [(np.dot(W, x) + b) for x in X]\n S_hat = BatchNormalize(S) if BN else S\n H = np.maximum(S_hat, 0) if _ReLu else 1 / (1 + np.exp(-S_hat))\n return H, S", "def get_weights(self):", "def activate(self, inputs):\n # Calculate values of hidden nodes\n hidden_values = []\n for i in range(self.hidden_layer_size):\n hidden_node_value = 0\n bias_weight = self.bias_weights[i]\n hidden_node_value += bias_weight\n for j in range(self.input_values):\n weight = self.input_to_hidden_layer_weights[i][j]\n hidden_node_value += inputs[j] * weight\n\n # ReLU activation function\n hidden_node_value = max(hidden_node_value, 0)\n\n hidden_values.append(hidden_node_value)\n\n # Calculate output value\n output_value = 0\n for i in range(self.hidden_layer_size):\n output_value += hidden_values[i] * \\\n self.hidden_to_output_layer_weights[i]\n\n return output_value", "def forward(self, inputs):\n #print(\"w1 shape\", self.w1.shape)\n z1 = np.dot(inputs, self.w1)\n self.a1 = sigmoid(z1)\n \n z2 = np.dot(self.a1, self.w2)\n self.a2 = sigmoid(z2)\n \n z3 = np.dot(self.a2, self.w3)\n self.y = sigmoid(z3)\n \n return self.y", "def __call__(self, x):\n if self.dropout > 0:\n x = ht.dropout_op(x, 1 - self.dropout)\n x = ht.matmul_op(x, self.weight)\n msg = x + ht.broadcastto_op(self.bias, x)\n x = ht.csrmm_op(self.mp, msg)\n if self.activation == \"relu\":\n x = ht.relu_op(x)\n elif self.activation is not None:\n raise NotImplementedError\n return x", "def predict(self, X, w):\n if self.loss == 'linear':\n value = X.dot(w)\n prediction = (value >= 0.5).astype(int)\n elif self.loss == 'perceptron':\n value = X.dot(w)\n prediction = (value >= 0).astype(int)\n elif self.loss == 'svm':\n value = X.dot(w)\n prediction = (value >= 0).astype(int)\n elif self.loss == 'logistic':\n value = self.logistic(X, w)\n prediction = (value >= 0.5).astype(int)\n\n return value, prediction", "def predict(self, X):\n wlist = self.get_model_params()\n \n with torch.no_grad():\n self.Net.linear1.weight = torch.nn.Parameter(torch.from_numpy(wlist[0].T))\n self.Net.linear1.bias = torch.nn.Parameter(torch.from_numpy(wlist[1]))\n\n self.Net.linear2.weight = torch.nn.Parameter(torch.from_numpy(wlist[2].T))\n self.Net.linear2.bias = torch.nn.Parameter(torch.from_numpy(wlist[3]))\n\n self.Net.linear3_1.weight = torch.nn.Parameter(torch.from_numpy(wlist[4].T))\n self.Net.linear3_1.bias = torch.nn.Parameter(torch.from_numpy(wlist[5]))\n\n self.Net.linear3_2.weight = torch.nn.Parameter(torch.from_numpy(wlist[6].T))\n self.Net.linear3_2.bias = torch.nn.Parameter(torch.from_numpy(wlist[7]))\n\n self.Net.linear4_1.weight = torch.nn.Parameter(torch.from_numpy(wlist[8].T))\n self.Net.linear4_1.bias = torch.nn.Parameter(torch.from_numpy(wlist[9]))\n\n self.Net.linear4_2.weight = torch.nn.Parameter(torch.from_numpy(wlist[10].T))\n self.Net.linear4_2.bias = torch.nn.Parameter(torch.from_numpy(wlist[11]))\n\n\n tensor_X = torch.from_numpy(X)\n out_1, out_2 = self.Net(tensor_X)\n \n ## Apply sigmoid to out_1 here before predicting\n out_1 = torch.sigmoid(out_1)\n out_1[out_1 >= 0.5] = 1\n out_1[out_1 < 0.5] = 0\n out_1 = out_1.long()\n\n y_predict = out_1.detach().numpy()\n offset_predict = out_2.detach().numpy()\n return y_predict, offset_predict", "def predict(w,b,X):\n m = X.shape[1]\n # Y_prediction = np.array((1,m))\n Y_prediction = np.zeros((1, m))\n w = w.reshape(X.shape[0],1)\n\n # compute vector \"A\" predicting the probalitilies of a cat present in image\n A = sigmoid(np.dot(w.T,X)+b)\n\n for i in range(A.shape[1]):\n # Convert probabilities A[0,i] to actual predictions p[0,i]\n if A[0,i]<=0.5:\n Y_prediction[0,i] = 0\n else:\n Y_prediction[0,i] = 1\n\n assert(Y_prediction.shape==(1,m))\n return Y_prediction", "def _compute_action(self, final_hidden: Tensor) -> Tensor:\n actions = self.activation(self.action_net.forward(final_hidden))\n return actions.view(self.batch_size, self.num_experts, self.output_size_per_expert)", "def get_hidden_output(self):\n\n\t\treturn self.activation(\n\t\t\ttheano.tensor.dot(self.symbolic_corrupted_input, self.weights) +\n\t\t\tself.bias)", "def __call__(self, inputs, state, scope=None):\n with tf.variable_scope(scope or type(self).__name__):\n if self._weightnorm == 'full' or self._weightnorm == 'input':\n input_weights = hops.get_weightnormed_matrix(\n [self.input_size, self.state_size],\n name='V', V_init=self._xh_init)\n\n else:\n input_weights = tf.get_variable(\n 'V',\n [self.input_size, self.state_size],\n initializer=self._xh_init)\n if self._weightnorm == 'full' or self._weightnorm == 'recurrent':\n hidden_weights = hops.get_weightnormed_matrix(\n [self.state_size, self.state_size],\n name='W', V_init=self._hh_init)\n else:\n hidden_weights = tf.get_variable(\n 'W',\n [self.state_size, self.state_size],\n initializer=self._hh_init)\n bias = tf.get_variable('b', [self.state_size],\n initializer=self._b_init)\n\n if self._weight_noise != 0.0:\n hidden_weights = hops.variational_wrapper(\n hidden_weights, weight_noise=self._weight_noise,\n name='hidden_weightnoise')\n input_weights = hops.variational_wrapper(\n input_weights, weight_noise=self._weight_noise,\n name='input_weightnoise')\n if self._keep_prob != 0.0:\n inputs = hops.variational_wrapper(\n inputs, keep_prob=self._keep_prob,\n name='input_dropout')\n state = hops.variational_wrapper(\n state, keep_prob=self._keep_prob,\n name='state_dropout')\n\n a = tf.matmul(state, hidden_weights)\n b = tf.matmul(inputs, input_weights)\n pre_activations = a + b\n if self._weightnorm == 'layer':\n pre_activations = hops.layer_normalise(pre_activations)\n output = self._nonlinearity(pre_activations + bias)\n\n if self._orthreg:\n reg_loss = tf.reduce_sum(tf.squared_difference(\n tf.matmul(hidden_weights, hidden_weights, transpose_b=True),\n tf.constant(np.eye(self.state_size), dtype=tf.float32)))\n reg_loss *= self._orthreg\n tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,\n reg_loss)\n\n return output, output", "def forwardPropagation(self, inputs, label):\n node_hidden = np.dot(inputs, self.input_W)\n node_hidden = np.add(node_hidden, self.input_B)\n node_hidden = np.maximum(0, node_hidden)\n node_output = np.dot(node_hidden, self.hidden_W)\n node_output = np.add(node_output, self.hidden_B)\n #print(node_output)\n exp_node_output = np.exp(node_output)\n node_output = exp_node_output / np.sum(exp_node_output, axis=1, keepdims=True)\n #print(node_output)\n #node_output = self.softmax(node_output)\n loss = np.sum(-np.log(node_output[range(inputs.shape[0]),label]))/(inputs.shape[0])+0.5 * self.regularizer*np.sum(self.input_W *self.input_W)+0.5 * self.regularizer*np.sum(self.hidden_W *self.hidden_W)\n \"\"\"Loss= Input data loss + Loss correction by penalizing the loss, here we use 0.2 as an experimental value\"\"\"\n #loss = np.sum(-np.log(node_output[range(inputs.shape[0]), label])) / (inputs.shape[0]) + 0.2 * self.regularizer * np.sum(self.input_W ^ 2) + 0.2 * self.regularizer * np.sum(self.hidden_W ^ 2)\n return loss, node_hidden, node_output", "def evaluate(self):\n # initialize delta_weights\n Loss = 0\n for i, x_test in enumerate(self.X_test):\n Loss += (self.sigmoid(np.dot(self.weights,x_test))-self.y_test[i])**2\n return Loss", "def evaluate(self, architecture_output, target_output):\n\n def discriminator_layer(image, name, n, depth, stride, training=True):\n \"\"\"This function creates one layer of the discriminator network.\n\n This function is to be called when creating the structure of the\n discriminator network as it's often used.\n\n Args:\n image: The image to input in the convolutions.\n\n name: The name of the layer.\n\n n: the fourth dimension of the shape of the weights.\n\n depth: the third dimension of the shape of the weights.\n\n stride: the stride to use in the convolution.\n\n Returns:\n The resulting activations after applying the layer.\n \"\"\"\n weights = tf.get_variable(shape=[3, 3, depth, n], name=\"weights\" + name,\n initializer=tf.uniform_unit_scaling_initializer(factor=0.01))\n #biases = tf.Variable(tf.constant(0.01, shape=[n]), name=\"biases\" + name)\n biases = tf.get_variable(shape=[n], name=\"biases\" + name, \n initializer=tf.constant_initializer(value=0.01))\n\n conv = tf.nn.conv2d(image, weights, strides=[1, stride, stride, 1],\n padding=\"VALID\") + biases\n leaky = tf.maximum(0.1 * conv, conv)\n\n return tf.contrib.layers.batch_norm(leaky, center=True, updates_collections=None,\n scale=True, is_training=training)\n\n def discriminator_network(image):\n # Input Layer\n weights = tf.get_variable(shape=[3, 3, 3, 64], name=\"weights1\",\n initializer=tf.uniform_unit_scaling_initializer(factor=0.01))\n #biases = tf.Variable(tf.constant(0.01, shape=[64]), name=\"biases1\")\n biases = tf.get_variable(shape=[64], name=\"biases1\", \n initializer=tf.constant_initializer(value=0.01))\n conv = tf.nn.conv2d(image, weights, strides=[1, 1, 1, 1],\n padding=\"SAME\") + biases\n leaky = tf.maximum(0.1 * conv, conv)\n\n # Discriminator Layers\n layer1 = discriminator_layer(leaky, \"A\", 64, 64, 2, training=True)\n layer2 = discriminator_layer(layer1, \"B\", 128, 64, 1, training=True)\n layer3 = discriminator_layer(layer2, \"C\", 128, 128, 2, training=True)\n layer4 = discriminator_layer(layer3, \"D\", 256, 128, 1, training=True)\n layer5 = discriminator_layer(layer4, \"E\", 256, 256, 2, training=True)\n layer6 = discriminator_layer(layer5, \"F\", 512, 256, 2, training=True)\n layer7 = discriminator_layer(layer6, \"G\", 512, 512, 2, training=True)\n layer8 = discriminator_layer(layer7, \"H\", 512, 512, 2, training=True)\n\n # Output Layer\n shape = int(np.prod(layer8.get_shape()[1:]))\n flat = tf.reshape(layer8, [-1, shape])\n weights = tf.get_variable(shape=[shape, 1], name=\"weights2\", dtype=tf.float32,\n initializer=tf.truncated_normal_initializer(stddev=1e-1))\n biases = tf.get_variable(shape=[1], name=\"biases2\", dtype=tf.float32,\n initializer=tf.constant_initializer(1.0))\n connect = tf.matmul(flat, weights) + biases\n\n return tf.maximum(0.1 * connect, connect)\n\n with tf.variable_scope(\"discriminator\", reuse=None):\n self.disc_gt = discriminator_network(target_output)\n\n with tf.variable_scope(\"discriminator\", reuse=True):\n self.disc_out = discriminator_network(architecture_output)\n\n # Network Loss\n #loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n # logits=self.disc_out, labels=tf.ones_like(self.disc_out)))\n loss = tf.reduce_mean(-tf.log(self.disc_out + 1e-12))\n\n return loss", "def __init__(self, input_nodes, hidden_nodes, hidden_layers, output_nodes):\n # Class members:\n # num_input_nodes\n # num_hidden_nodes\n # num_hidden_layers\n # num_output_nodes\n # weights = [[num_hidden_nodes, num_input_nodes],[num_hidden_nodes, num_hidden_nodes],[]<- for each hl,\n # [num_output_nodes, num_hidden_nodes]]\n # biases\n\n self.num_input_nodes = input_nodes\n self.num_hidden_nodes = hidden_nodes\n self.num_hidden_layers = hidden_layers\n self.num_output_nodes = output_nodes\n\n self.weights = []\n for i in range(self.num_hidden_layers + 1):\n if i is 0:\n # first weights array is input to hidden\n self.weights.append(.5 * np.random.rand(self.num_hidden_nodes, self.num_input_nodes) - .25)\n\n elif i < self.num_hidden_layers:\n # next weight array is hidden nodes to hidden nodes\n self.weights.append(.5 * np.random.rand(self.num_hidden_nodes, self.num_hidden_nodes) - .25)\n else:\n # last weight array is hidden nodes to output nodes\n self.weights.append(.5 * np.random.rand(self.num_output_nodes, self.num_hidden_nodes) - .25)\n\n self.biases = []\n for i in range(self.num_hidden_layers + 1):\n if i < self.num_hidden_layers:\n # for every hidden node there is a bias\n self.biases.append(0.5 * np.random.rand(self.num_hidden_nodes) - .25)\n else:\n # for the output node there is a bias as well\n self.biases.append(0.5 * np.random.rand(self.num_output_nodes) - .25)\n\n self.activation = np.vectorize(self.tanh, otypes=[float])", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n if (len(incoming.shape) > 2 and self.flatten_input) or (len(incoming.shape) > 3):\n # Flatten all but first dimension (e.g. flat seq_pos and features)\n X = tf.reshape(incoming, self.incoming_shape)\n else:\n X = incoming\n net = dot_product(X, self.W)\n if self.b is not None:\n net += self.b\n self.out = self.a(net)\n \n return self.out", "def forward(self, input):\r\n output = np.matmul(input, self.weights) + bias\r\n return output", "def compute_pred_network_output(self, model_idx, input_data):\n\n feed_dict = {}\n feed_dict[self.X_Minibatch] = input_data\n return self.session.run(self.pred_output[model_idx], feed_dict=feed_dict)", "def process_weights_and_bias(self, rnn_weights, rnn_props):\n # from code of tensorflow GRU cell, it can be known that shape of hidden_kernel(or candidate_kernel)\n # is (input_size+hidden_unit, hidden_unit)\n hidden_size = rnn_weights[\"hidden_kernel\"].value.shape[1]\n input_size = rnn_weights[\"hidden_kernel\"].value.shape[0] - hidden_size\n weight_dtype = rnn_weights[\"hidden_kernel\"].dtype\n bias_dtype = rnn_weights[\"hidden_bias\"].dtype\n # below code will use same notation as ONNX document\n # z means update gate, r means reset gate, h means hidden gate;\n # at this time weights of gate include input and state, will split it next\n r_kernel, z_kernel = np.split(rnn_weights[\"gate_kernel\"].value, [hidden_size], axis=1)\n h_kernel = rnn_weights[\"hidden_kernel\"].value\n r_bias, z_bias = np.split(rnn_weights[\"gate_bias\"].value, [hidden_size], axis=0)\n h_bias = rnn_weights[\"hidden_bias\"].value\n # ONNX GRU split weights of input and state, so have to split *_kernel\n input_r_kernel, state_r_kernel = np.split(r_kernel, [input_size], axis=0)\n input_z_kernel, state_z_kernel = np.split(z_kernel, [input_size], axis=0)\n input_h_kernel, state_h_kernel = np.split(h_kernel, [input_size], axis=0)\n W_zrh = np.concatenate((input_z_kernel, input_r_kernel, input_h_kernel), axis=1)\n R_zrh = np.concatenate((state_z_kernel, state_r_kernel, state_h_kernel), axis=1)\n # transpose weight matrix\n W_zrh = np.transpose(np.expand_dims(W_zrh, axis=0), axes=(0, 2, 1))\n R_zrh = np.transpose(np.expand_dims(R_zrh, axis=0), axes=(0, 2, 1))\n W_zrh = W_zrh.astype(weight_dtype)\n R_zrh = R_zrh.astype(weight_dtype)\n assert W_zrh.shape == (1, 3*hidden_size, input_size)\n assert R_zrh.shape == (1, 3*hidden_size, hidden_size)\n Wb_zrh = np.concatenate((z_bias, r_bias, h_bias), axis=0)\n # tf don't have bias for state, so use 0 instead\n zero = np.zeros_like(z_bias)\n Rb_zrh = np.concatenate((zero, zero, zero), axis=0)\n B_zrh = np.concatenate((Wb_zrh, Rb_zrh), axis=0)\n B_zrh = np.expand_dims(B_zrh, axis=0)\n B_zrh = B_zrh.astype(bias_dtype)\n assert B_zrh.shape == (1, 6*hidden_size)\n # create const ONNX node\n w_name = utils.make_name(\"W\")\n w_node = self.g.make_const(w_name, W_zrh, skip_conversion=True)\n\n r_name = utils.make_name(\"R\")\n r_node = self.g.make_const(r_name, R_zrh, skip_conversion=True)\n\n b_name = utils.make_name(\"B\")\n b_node = self.g.make_const(b_name, B_zrh, skip_conversion=True)\n\n rnn_props.input_size = input_size\n rnn_props.hidden_size = hidden_size\n rnn_props.onnx_input_ids[\"W\"] = w_node.output[0]\n rnn_props.onnx_input_ids[\"R\"] = r_node.output[0]\n rnn_props.onnx_input_ids[\"B\"] = b_node.output[0]", "def predict(w, b, X):\n\n m = X.shape[1]\n Y_prediction = np.zeros((1,m))\n w = w.reshape(X.shape[0], 1)\n\n A = sigmoid(np.dot(w.T, X) + b)\n\n for i in range(A.shape[1]):\n\n print (i)\n Y_prediction[0, i] = 1 if A[0, i] > 0.5 else 0\n\n return Y_prediction" ]
[ "0.67609805", "0.6741717", "0.6619007", "0.65938884", "0.650701", "0.65016365", "0.6475759", "0.6448658", "0.63928056", "0.63902205", "0.63838184", "0.63790214", "0.63790214", "0.6343709", "0.6316035", "0.6315198", "0.6305553", "0.62861043", "0.628232", "0.62760216", "0.62695944", "0.6269293", "0.6243012", "0.62320924", "0.6231806", "0.62314355", "0.6226472", "0.62252444", "0.62252444", "0.6205548", "0.62053794", "0.61991143", "0.6156547", "0.6144094", "0.61234975", "0.61204594", "0.61183774", "0.6095704", "0.6091777", "0.60849553", "0.6079997", "0.60758", "0.6072787", "0.6062241", "0.6061752", "0.6058371", "0.60547584", "0.6052505", "0.60502565", "0.60451907", "0.60451865", "0.6044144", "0.6037798", "0.6023052", "0.6016201", "0.60146344", "0.6004657", "0.6001072", "0.60008234", "0.5997987", "0.59939295", "0.59897685", "0.5980111", "0.5976778", "0.59752274", "0.5961007", "0.59608203", "0.59365773", "0.593204", "0.5927388", "0.5927167", "0.5921265", "0.5918528", "0.59160477", "0.59125775", "0.5912122", "0.59109896", "0.5909023", "0.5905325", "0.5903963", "0.58968806", "0.589083", "0.5883564", "0.58784366", "0.58771694", "0.58711475", "0.58702904", "0.58678705", "0.58664787", "0.5865622", "0.58655477", "0.58609295", "0.5859534", "0.58570296", "0.5850452", "0.58484465", "0.5845988", "0.58337307", "0.58327186", "0.5826011", "0.5824844" ]
0.0
-1
returns the bounds asssociated with input to this layer
def get_bounds(self, channel_indx=None): if channel_indx is None: upper_bound = self.upper_bound.reshape(self.batch_size, -1) lower_bound = self.lower_bound.reshape(self.batch_size, -1) else: upper_bound = self.upper_bound[:, channel_indx, :].reshape( self.batch_size, -1 ) lower_bound = self.lower_bound[:, channel_indx, :].reshape( self.batch_size, -1 ) return upper_bound, lower_bound
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bounds(self):\n return self.kernel.bounds", "def input_bounds(self):\n return self.__input_bounds", "def bounds(self):\n return self.GetBounds()", "def bounds(self):\n return self._bounds", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def input_bounds(self):\n if self._input_bounds:\n return dict(self._input_bounds)\n return None", "def input_bounds(self):\n if self._input_bounds:\n return dict(self._input_bounds)\n return None", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def get_bounds_parameters(self):\n bounds = []\n bounds += self.var_noise.bounds\n bounds += self.mean.bounds\n bounds += self.kernel.get_bounds_parameters()\n\n return bounds", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def get_bounds(self):\n raise Exception(\"Non-implemented base class method.\")", "def bounds(self):\n if self.change_dimensionality:\n return [self._bounds[0]] * self.N\n else:\n return self._bounds", "def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax", "def get_bounds(self):\n return self._geometry.bounds", "def input_bounds(self):\n return self._min_input, self._max_input", "def bounds(self): # -> tuple[()]:\n ...", "def bounds(self):\n return self.substrates.bounds", "def get_params_bounds(self) -> np.array:\n pass", "def get_bounds():\n return [0.00], [1.00]", "def bounds(self):\n return self.min_col, self.min_row, self.max_col, self.max_row", "def bounds(self) -> typing.List[float]:\n raise NotImplementedError()", "def bounds(self):\n return (\n self.x, self.y,\n self.x, self.y\n )", "def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)", "def get_bounds():\n bounds = [\n (0.1, 0.5), # Omega_m\n (0.05, 0.15) # beta\n ]\n return np.array(bounds)", "def get_bounds(self):\n log.debug(str(inspect.stack()[1][3]) + \"--> OC.get_bounds()\")\n\n # TODO: Move the operation out of here.\n\n xmin = Inf\n ymin = Inf\n xmax = -Inf\n ymax = -Inf\n\n # for obj in self.object_list:\n for obj in self.get_list():\n try:\n gxmin, gymin, gxmax, gymax = obj.bounds()\n xmin = min([xmin, gxmin])\n ymin = min([ymin, gymin])\n xmax = max([xmax, gxmax])\n ymax = max([ymax, gymax])\n except Exception as e:\n log.warning(\"DEV WARNING: Tried to get bounds of empty geometry. %s\" % str(e))\n\n return [xmin, ymin, xmax, ymax]", "def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)", "def condition_bounds(self) -> Tuple[float, float]:\n raise NotImplementedError", "def optimization_bounds(self, topology):\n bounds_low = np.zeros(self.number_of_parameters())\n bounds_up = np.zeros(self.number_of_parameters())\n\n for pkey, parameter in self.parameters.items():\n bounds_low[pkey] = parameter.bound_low(topology)\n bounds_up[pkey] = parameter.bound_up(topology)\n\n return bounds_low, bounds_up", "def bounds(self, pos):", "def get_bounds(self, parameter_name=None):\n if parameter_name is None:\n return [self.get_bounds(p)\n for p in self.shape_parameters]\n if parameter_name in self.shape_parameters.keys():\n bounds = []\n for ll in self.likelihood_list:\n if parameter_name in ll.shape_parameters.keys():\n bounds.append(ll.get_bounds(parameter_name))\n bounds = np.array(bounds)\n ret= np.max(bounds[:,0]), np.min(bounds[:,1])\n if ret[1] <= ret[0]:\n raise InvalidParameterSpecification(\"lower bound %s higher than upper bound!\" % parameter_name)\n return ret\n\n elif parameter_name.endswith('_rate_multiplier'):\n return 0, float('inf')\n else:\n raise InvalidParameter(\"Non-existing parameter %s\" % parameter_name)", "def bounds(self) -> tuple[float, float, float, float]:\n transform = self.transform\n a, b, c, d, e, f, _, _, _ = transform\n if b == d == 0:\n xs = (c, c + a * self.width)\n ys = (f, f + e * self.height)\n else: # rotated\n c0x, c0y = c, f\n c1x, c1y = transform * (0, self.height)\n c2x, c2y = transform * (self.width, self.height)\n c3x, c3y = transform * (self.width, 0)\n xs = (c0x, c1x, c2x, c3x)\n ys = (c0y, c1y, c2y, c3y)\n return min(xs), min(ys), max(xs), max(ys)", "def bounds(self) -> Tensor:\n return torch.cat([self.mins, self.mins + self.ranges], dim=-2)", "def bounds(self):\n b = []\n\n for dim in self.dimensions:\n if dim.size == 1:\n b.append(dim.bounds)\n else:\n b.extend(dim.bounds)\n\n return b", "def get_bounds(self, parameter_name=None):\n if parameter_name is None:\n return [self.get_bounds(p) for p in self.shape_parameters.keys()]\n elif parameter_name in list(self.__likelihood.rate_parameters.keys()) + list(self.__likelihood.shape_parameters.keys()):\n return self.__likelihood.get_bounds(parameter_name)\n # in the newly added parameters\n else:\n anchor_settings = list(self.shape_parameters[parameter_name][0].keys())\n return min(anchor_settings), max(anchor_settings)", "def internal_bounds(self) -> tuple[float, float, float, float]:\n xres, yres = self.res\n w, s, e, n = self.bounds\n y0, y1 = (n, s) if yres < 0 else (s, n)\n x0, x1 = (e, w) if xres < 0 else (w, e)\n return x0, y0, x1, y1", "def bounds(self):\n \n return self.osmdb.bounds()", "def bounds(self):\n return [(2, None)]", "def get_bounds(self):\n x_max = self.data['x'].max()\n y_max = self.data['y'].max()\n z_max = self.data['z'].max()\n print(\"x={}; y={}; z={}\".format(x_max, y_max, z_max))\n return (x_max, y_max, z_max)", "def bounds(self) -> Box:\n raise NotImplementedError()", "def bounds(self):\n frame_ = self.to_frame().total_bounds.flatten().tolist()\n return BBox(\n left=frame_[0], bottom=frame_[1], right=frame_[2], top=frame_[3]\n )", "def get_raw_bounds(self) -> [Vector, Vector]:\n\t\tverts = np.array([v.co for mesh in self._meshes for v in mesh.data.vertices])\n\t\tbbox_min = Vector([*np.min(verts, axis=0)])\n\t\tbbox_max = Vector([*np.max(verts, axis=0)])\n\t\treturn bbox_min, bbox_max", "def getBounds(self, nStates, nParams):\n raise NotImplementedError(\n \"bounds have not been implemented for this Experiment\")", "def Bounds(self):\n assert self.points is not None\n\n if self.points.shape[1] == 3:\n bounds = np.array([[np.min(self.points[:,0]),\n np.min(self.points[:,1]),\n np.min(self.points[:,2])],\n [np.max(self.points[:,0]),\n np.max(self.points[:,1]),\n np.max(self.points[:,2])]])\n makezero(bounds)\n return bounds\n elif self.points.shape[1] == 2:\n bounds = np.array([[np.min(self.points[:,0]),\n np.min(self.points[:,1])],\n [np.max(self.points[:,0]),\n np.max(self.points[:,1])]])\n makezero(bounds)\n return bounds\n elif self.points.shape[1] == 1:\n bounds = np.array([[np.min(self.points[:,0])],\n [np.max(self.points[:,0])]])\n makezero(bounds)\n return bounds\n else:\n raise ValueError(\"Invalid dimension for mesh coordinates\")", "def getbounds(self):\n return pygame.Rect(self.rect)", "def bounds(self):\n x,y,z = self._arr\n try:\n return Bounds(x.min(), y.min(), z.min(),\n x.max(), y.max(), z.max())\n except ValueError:\n raise simulocloud.exceptions.EmptyPointCloud(\n \"len 0 PointCloud has no Bounds\")", "def get_bounds(self):\n bottom_right = np.asarray([self.coords[k][0] for k in range(self.dim)])\n upper_left = np.asarray([self.coords[k][-1] for k in range(self.dim)])\n return bottom_right, upper_left", "def param_bounds(self) -> Optional[Sequence[Tuple[float, float]]]:\n return [(-1.0, 1.0)] * len(list(self.params()))", "def domain_bounds(self):\n return self._xmin, self._xmax, self._ymin, self._ymax, self._zmin, self._zmax", "def get_bounds(self, parameter_name=None):\n if parameter_name is None:\n return [self.get_bounds(p) for p in self.shape_parameters.keys()]\n if parameter_name in self.shape_parameters:\n anchor_settings = list(self.shape_parameters[parameter_name][0].keys())\n return min(anchor_settings), max(anchor_settings)\n elif parameter_name.endswith('_rate_multiplier'):\n for source_name, allow_negative in zip(self.source_name_list,self.source_allowed_negative):\n if parameter_name.startswith(source_name) and allow_negative==True:\n return float('-inf'), float('inf')\n return 0, float('inf')\n else:\n raise InvalidParameter(\"Non-existing parameter %s\" % parameter_name)", "def force_bounds(self):\n return self._min_force, self._max_force", "def compute_bounds(self, space):\n bounds = np.zeros((len(space), 2))\n\n for idx, param in enumerate(space):\n\n if TYPE[param[\"type\"]] is TYPE.FLOAT or \\\n TYPE[param[\"type\"]] is TYPE.INTEGER:\n bounds[idx] = (param[\"min\"], param[\"max\"])\n\n elif TYPE[param[\"type\"]] is TYPE.DISCRETE or \\\n TYPE[param[\"type\"]] is TYPE.DISCRETE:\n bounds[idx] = (0, len(param['values']))\n\n return bounds", "def get_bounds(self):\r\n left, bottom, front = 10000, 10000, 10000\r\n right, top, back = -10000, -10000, -10000\r\n for b in self.buf:\r\n for v in b.vertices:\r\n if v[0] < left:\r\n left = v[0]\r\n if v[0] > right:\r\n right = v[0]\r\n if v[1] < bottom:\r\n bottom = v[1]\r\n if v[1] > top:\r\n top = v[1]\r\n if v[2] < front:\r\n front = v[2]\r\n if v[2] > back:\r\n back = v[2]\r\n\r\n return (left, bottom, front, right, top, back)", "def get_bounds(self):\n\n northing=self.f.variables['y']\n easting=self.f.variables['x']\n\n lat1,lon1 = utm.to_latlon(np.min(easting),np.min(northing),11,northern=True)\n lat2,lon2 = utm.to_latlon(np.max(easting),np.max(northing),11,northern=True)\n\n return (lon1,lon2,lat1,lat2)", "def GetBounds(self, p_int, p_int_1, p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def get_bounding_box(self):\n if len(self.polygons) == 0:\n return None\n return numpy.array(((min(pts[:, 0].min() for pts in self.polygons),\n min(pts[:, 1].min() for pts in self.polygons)),\n (max(pts[:, 0].max() for pts in self.polygons),\n max(pts[:, 1].max() for pts in self.polygons))))", "def get_bounds(geo_data):\n return geo_data[\"geometry\"].bounds", "def bounds(self) -> BoundsLike:\n the_bounds = np.array([np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf])\n\n def _update_bounds(bounds):\n def update_axis(ax):\n if bounds[ax * 2] < the_bounds[ax * 2]:\n the_bounds[ax * 2] = bounds[ax * 2]\n if bounds[ax * 2 + 1] > the_bounds[ax * 2 + 1]:\n the_bounds[ax * 2 + 1] = bounds[ax * 2 + 1]\n\n for ax in range(3):\n update_axis(ax)\n return\n\n for actor in self._actors.values():\n if isinstance(actor, (_vtk.vtkCubeAxesActor, _vtk.vtkLightActor)):\n continue\n if (\n hasattr(actor, 'GetBounds')\n and actor.GetBounds() is not None\n and id(actor) != id(self.bounding_box_actor)\n ):\n _update_bounds(actor.GetBounds())\n\n if np.any(np.abs(the_bounds)):\n the_bounds[the_bounds == np.inf] = -1.0\n the_bounds[the_bounds == -np.inf] = 1.0\n\n return cast(BoundsLike, tuple(the_bounds))", "def uv_bounds(self):\n umin, umax, vmin, vmax = breptools_UVBounds(self.topods_shape())\n bounds = Box(np.array([umin, vmin]))\n bounds.encompass_point(np.array([umax, vmax]))\n return bounds", "def _get_bound(self):\n\n if self.totensor:\n max_ = 1.0\n min_ = 0.0\n else:\n max_ = 255.0\n min_ = 0.0\n\n upper = (max_ - torch.tensor(self.img_norm['mean'])) / torch.tensor(self.img_norm['std'])\n lower = min_ - torch.tensor(self.img_norm['mean']) / torch.tensor(self.img_norm['std'])\n \n return upper, lower", "def bounds(self) -> devices.PrimaryBounds:\n if self._bounds is None:\n print(self.__class__.bounds.__doc__)\n raise SilSubProblemError(\n \"The *bounds* property has not been set (see above).\"\n )\n return self._bounds", "def bounding_box(self):\n if self._owcs.pixel_bounds is None:\n if self._owcs.pixel_shape is not None:\n nx, ny = self._owcs.pixel_shape\n elif self._owcs.array_shape is not None:\n ny, nx = self._owcs.array_shape\n else:\n return None\n\n return ((-0.5, nx - 0.5), (-0.5, ny - 0.5))\n\n else:\n return self._owcs.pixel_bounds", "def bounding_box(self):\n if self._owcs.pixel_bounds is None:\n if self._owcs.pixel_shape is not None:\n nx, ny = self._owcs.pixel_shape\n elif self._owcs.array_shape is not None:\n ny, nx = self._owcs.array_shape\n else:\n return None\n\n return ((-0.5, nx - 0.5), (-0.5, ny - 0.5))\n\n else:\n return self._owcs.pixel_bounds", "def eigs_bounds(self) -> Tuple[float, float]:\n raise NotImplementedError", "def total_bounds(self):\n if not self.empty:\n return tuple(self.numba_rtree._bounds_tree[0, :])\n else:\n return tuple((np.nan,) * self.numba_rtree._bounds_tree.shape[1])", "def getBounds(self):\n return GRectangle(getX(), getY(), frameWidth, frameHeight)", "def _getBounds(self, request):\n start = _getBound(request.args, \"start\")\n stop = _getBound(request.args, \"stop\", self._collection.pageSize)\n return start, stop", "def get_bounds(self):\n occupied_locations = self.board.keys()\n min_x = min(p[0] for p in occupied_locations)\n max_x = max(p[0] for p in occupied_locations)\n min_y = min(p[1] for p in occupied_locations)\n max_y = max(p[1] for p in occupied_locations)\n return ((min_x, max_x), (min_y, max_y))", "def boundingRect(self):\n return self.rect().adjusted(-2, -2, 2, 2)", "def get_bounding_box(self):\n lon, lat = self.coordinates\n\n ll = (np.min(lon),np.min(lat))\n ul = (np.min(lon),np.max(lat))\n ur = (np.max(lon),np.max(lat))\n lr = (np.max(lon),np.min(lat))\n\n return (ll, ul, ur, lr)", "def get_bbox(self):\n return (self.get_handler().PROCESSING_LIMIT_WEST,\n self.get_handler().PROCESSING_LIMIT_SOUTH,\n self.get_handler().PROCESSING_LIMIT_EAST,\n self.get_handler().PROCESSING_LIMIT_NORTH\n )", "def fd_bounds(self):\n return self._fd_bounds", "def parameter_bounds(self):\n for name, bound in self.named_parameter_bounds():\n yield bound", "def _get_one_bound(self, param_name):\n return getattr(self, '__' + param_name + '_bounds')", "def getBounds(self, srs=None):\n if srs not in self._bounds:\n gt = self._getGeoTransform()\n nativeSrs = self.getProj4String()\n if not nativeSrs:\n self._bounds[srs] = None\n return\n bounds = {\n 'll': {\n 'x': gt[0] + self.sourceSizeY * gt[2],\n 'y': gt[3] + self.sourceSizeY * gt[5],\n },\n 'ul': {\n 'x': gt[0],\n 'y': gt[3],\n },\n 'lr': {\n 'x': gt[0] + self.sourceSizeX * gt[1] + self.sourceSizeY * gt[2],\n 'y': gt[3] + self.sourceSizeX * gt[4] + self.sourceSizeY * gt[5],\n },\n 'ur': {\n 'x': gt[0] + self.sourceSizeX * gt[1],\n 'y': gt[3] + self.sourceSizeX * gt[4],\n },\n 'srs': nativeSrs,\n }\n # Make sure geographic coordinates do not exceed their limits\n if self._proj4Proj(nativeSrs).crs.is_geographic and srs:\n try:\n self._proj4Proj(srs)(0, 90, errcheck=True)\n yBound = 90.0\n except RuntimeError:\n yBound = 89.999999\n keys = ('ll', 'ul', 'lr', 'ur')\n for key in keys:\n bounds[key]['y'] = max(min(bounds[key]['y'], yBound), -yBound)\n while any(bounds[key]['x'] > 180 for key in keys):\n for key in keys:\n bounds[key]['x'] -= 360\n while any(bounds[key]['x'] < -180 for key in keys):\n for key in keys:\n bounds[key]['x'] += 360\n if any(bounds[key]['x'] >= 180 for key in keys):\n bounds['ul']['x'] = bounds['ll']['x'] = -180\n bounds['ur']['x'] = bounds['lr']['x'] = 180\n if srs and srs != nativeSrs:\n inProj = self._proj4Proj(nativeSrs)\n outProj = self._proj4Proj(srs)\n keys = ('ll', 'ul', 'lr', 'ur')\n pts = pyproj.Transformer.from_proj(inProj, outProj, always_xy=True).itransform([\n (bounds[key]['x'], bounds[key]['y']) for key in keys])\n for idx, pt in enumerate(pts):\n key = keys[idx]\n bounds[key]['x'] = pt[0]\n bounds[key]['y'] = pt[1]\n bounds['srs'] = srs.decode() if isinstance(srs, bytes) else srs\n bounds['xmin'] = min(bounds['ll']['x'], bounds['ul']['x'],\n bounds['lr']['x'], bounds['ur']['x'])\n bounds['xmax'] = max(bounds['ll']['x'], bounds['ul']['x'],\n bounds['lr']['x'], bounds['ur']['x'])\n bounds['ymin'] = min(bounds['ll']['y'], bounds['ul']['y'],\n bounds['lr']['y'], bounds['ur']['y'])\n bounds['ymax'] = max(bounds['ll']['y'], bounds['ul']['y'],\n bounds['lr']['y'], bounds['ur']['y'])\n self._bounds[srs] = bounds\n return self._bounds[srs]", "def _compute_bounds(self, axis, view):\n return None", "def canvas_bounds(self) -> utils.BoxRegion:", "def _init_optimizer_bounds(self):\n bounds = []\n for filt in self.filters:\n if filt.optimize_fc:\n bounds.append((np.log10(filt.min_fc), np.log10(filt.max_fc)))\n if filt.optimize_q:\n bounds.append((filt.min_q, filt.max_q))\n if filt.optimize_gain:\n bounds.append((filt.min_gain, filt.max_gain))\n return bounds", "def calc_bounds(roi: np.ndarray) -> Dict[int, BoundInfo]:\n try:\n min_bounds, max_bounds = calc_bounds(roi)\n return {\n num: BoundInfo(lower=lower, upper=upper)\n for num, (lower, upper) in enumerate(zip(min_bounds, max_bounds))\n if num != 0 and upper[0] != -1\n }\n except KeyError:\n bound_info = {}\n points = np.nonzero(roi)\n comp_num = roi[points]\n point_dict = defaultdict(list)\n for num, point in zip(comp_num, np.transpose(points)):\n point_dict[num].append(point)\n for num, points_for_num in point_dict.items():\n lower = np.min(points_for_num, 0)\n upper = np.max(points_for_num, 0)\n bound_info[num] = BoundInfo(lower=lower, upper=upper)\n return bound_info", "def get_object_bounds(self):\n if len(self._object_bounds) == 0:\n # Nothing plotted yet\n return -.01, .01, -.01, .01\n xmins, xmaxs, ymins, ymaxs = np.array(self._object_bounds).T\n xmax = max(xmaxs.max(), xmins.max())\n xmin = min(xmins.min(), xmaxs.min())\n ymax = max(ymaxs.max(), ymins.max())\n ymin = min(ymins.min(), ymaxs.min())\n return xmin, xmax, ymin, ymax", "def _get_bounds(x, y, size):\n x = np.array(np.atleast_1d(x))\n y = np.array(np.atleast_1d(y))\n\n lower_x = np.rint(x - size[0]/2)\n lower_y = np.rint(y - size[1]/2)\n\n return np.stack((np.stack((lower_x, lower_x + size[0]), axis=1),\n np.stack((lower_y, lower_y + size[1]), axis=1)), axis=1).astype(int)", "def get_bounds(self, crs=\"default\"):\n\n if crs == \"default\":\n crs = podpac.settings[\"DEFAULT_CRS\"]\n\n bounds = {}\n for coords in self.find_coordinates():\n ct = coords.transform(crs)\n for dim, (lo, hi) in ct.bounds.items():\n if dim not in bounds:\n bounds[dim] = (lo, hi)\n else:\n bounds[dim] = (min(lo, bounds[dim][0]), max(hi, bounds[dim][1]))\n\n return bounds, crs", "def bounds(*tile):\n tile = _parse_tile_arg(*tile)\n xtile, ytile, zoom, provider_bounds = tile\n a = ul(xtile, ytile, zoom, provider_bounds)\n b = ul(xtile + 1, ytile + 1, zoom, provider_bounds)\n return Bbox(a[0], b[1], b[0], a[1])", "def bbox(self):\n lower = (self.x.min(), self.y.min())\n upper = (self.x.max(), self.y.max())\n return (lower, upper)", "def draw_bounds():\n\n pass", "def bounds(self, start=None, finish=None):\n lower = start if start is not None else self.limits[0]\n upper = finish if finish is not None else self.limits[1]\n\n lower = lower + self.offsets[0]\n upper = upper + self.offsets[1]\n\n return (lower, upper)", "def as_bounds(self) -> Dict[str, float]:\n return {\n \"left\": self.x,\n \"top\": self.y,\n \"right\": self.x + self.width,\n \"bottom\": self.y + self.height,\n }", "def GetBounds(self, vtkAMRBox, , , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def get_soft_bounds(self):\n if self.bounds is None:\n hl,hu=(None,None)\n else:\n hl,hu=self.bounds\n\n if self._softbounds is None:\n sl,su=(None,None)\n else:\n sl,su=self._softbounds\n\n \n if sl is None: l = hl\n else: l = sl\n\n if su is None: u = hu\n else: u = su\n\n return (l,u)", "def get_bound(box_list):\n box_xyxy_list = []\n for box in box_list:\n box_xyxy = xywh2xyxy(box)\n box_xyxy_list.append(box_xyxy)\n\n box_xyxy_list = np.array(box_xyxy_list)\n x1max, y1max, x2max, y2max = np.amax(box_xyxy_list, axis=0)\n x1min, y1min, x2min, y2min = np.amin(box_xyxy_list, axis=0)\n\n boundbox = xyxy2xywh([x1min, y1min, x2max, y2max])\n return boundbox", "def get_bounding_box(self, poly=None):\n\n use_poly = poly if poly else self.res_poly\n\n # TODO: Test to comply with future values.\n # Updates the bounds\n if self.bounds_changed:\n # Gets the minimum and maximum value of each bounds.\n self.xmin = float('inf')\n self.ymin = float('inf')\n self.xmax = float('-inf')\n self.ymax = float('-inf')\n\n for points in use_poly:\n x = points[0] - self.x\n y = points[1] - self.y\n\n if x < self.xmin:\n self.xmin = x\n if x > self.xmax:\n self.xmax = x\n if y < self.ymin:\n self.ymin = y\n if y > self.ymax:\n self.ymax = y\n\n # Set bounds changed to be false\n self.bounds_changed = False\n \n return [self.xmin + self.x,\n self.ymin + self.y,\n self.xmax + self.x,\n self.ymax + self.y]", "def bounds(self, axis, view=None):\n if view is None:\n view = self\n if axis not in self._vshare.bounds:\n self._vshare.bounds[axis] = self._compute_bounds(axis, view)\n return self._vshare.bounds[axis]", "def getBoundingBox(self):\n lX, lY = self.lX(), self.lY()\n return min(lX), min(lY), max(lX), max(lY)", "def get_bounds(p_state, idx_image=-1, idx_chain=-1):\n _min = (3*ctypes.c_float)()\n _max = (3*ctypes.c_float)()\n _Get_Bounds(ctypes.c_void_p(p_state), _min, _max,\n ctypes.c_int(idx_image), ctypes.c_int(idx_chain))\n return [_min[i] for i in range(3)], [_max[i] for i in range(3)]" ]
[ "0.8187093", "0.8182944", "0.7970464", "0.7969971", "0.7886397", "0.7886397", "0.7886397", "0.7886397", "0.7886397", "0.7886397", "0.7886397", "0.7886397", "0.78816897", "0.78816897", "0.78687114", "0.7821428", "0.77718407", "0.77343", "0.7731461", "0.7682162", "0.7611129", "0.7540622", "0.74872375", "0.74587536", "0.7418074", "0.7354644", "0.73538256", "0.73181725", "0.72986424", "0.725611", "0.7229051", "0.72100526", "0.71837676", "0.7179594", "0.71304065", "0.7105563", "0.7093498", "0.70596933", "0.70514995", "0.70404536", "0.70325524", "0.70232576", "0.6992838", "0.6968438", "0.6962102", "0.6961736", "0.69474226", "0.69445246", "0.69396555", "0.6900796", "0.68907595", "0.68820643", "0.6854982", "0.6771278", "0.6760466", "0.67541724", "0.66727334", "0.6671289", "0.6662446", "0.66535443", "0.66385126", "0.662892", "0.6621611", "0.6613608", "0.6608833", "0.660066", "0.65964127", "0.65943223", "0.65943223", "0.6564338", "0.65272874", "0.65181834", "0.6508763", "0.6495298", "0.6482775", "0.6459343", "0.6454756", "0.640271", "0.6402511", "0.6397707", "0.638", "0.6370048", "0.6361892", "0.6360125", "0.6358608", "0.635202", "0.63435286", "0.6337705", "0.6333184", "0.63274384", "0.63127303", "0.63109046", "0.62964976", "0.62699485", "0.6260705", "0.6258114", "0.6233396", "0.6229828", "0.6227297", "0.6200642" ]
0.6802181
53
routine used to test the current pooling implementation to make sure no discrepency between cvxpy and original pytorch layer
def _test(self): self.pytorch_layer.eval() pytorch_layer = copy.deepcopy(self.pytorch_layer).cpu() image_w_h = int(self.input_size ** 0.5) input_image = torch.rand(1, self.n_in_channels, image_w_h, image_w_h) output_tensor = pytorch_layer(input_image)[0] for channel in range(self.n_in_channels): current_channel = input_image[0, channel].squeeze().flatten().cpu().numpy() normalized_data = (current_channel - self.running_mean[channel]) / np.sqrt( self.running_var[channel] + self.epsilon ) if self.affine: output_numpy = (self.weights[channel] * normalized_data) + self.bias[ channel ] else: output_numpy = normalized_data assert np.isclose( output_numpy, output_tensor[channel].detach().flatten().cpu().numpy(), atol=1e-6, ).all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_pool_consistency(self) -> None:\n x = Constant(\n 'const1',\n Float32(),\n np.zeros([1, 3, 3, 3])\n )\n input_ops = {'X': cast(Operator, x)}\n\n MaxPool(\n 'max_pool1',\n [1, 2, 2, 3],\n Float32(),\n input_ops,\n kernel_shape=[3, 3],\n pads=[1, 1, 1, 1],\n strides=[2, 2]\n )\n\n print(\"Consistency test for pooling operator passed!\")", "def check_sample_correctishness_bc01(f):\n\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n\n rng = np.random.RandomState([2012, 9, 26])\n zv = rng.randn(batch_size, channels, rows,\n cols).astype(config.floatX) * 2. - 3.\n top_down_v = rng.randn(batch_size, channels, rows / pool_rows,\n cols / pool_cols).astype(config.floatX)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.tag.test_value = zv\n z_th.name = 'z_th'\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.tag.test_value = top_down_v\n top_down_th.name = 'top_down_th'\n\n theano_rng = MRG_RandomStreams(rng.randint(2147462579))\n p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,\n theano_rng)\n\n prob_func = function([z_th, top_down_th], [p_th, h_th])\n pv, hv = prob_func(zv, top_down_v)\n\n sample_func = function([z_th, top_down_th], [p_sth, h_sth])\n\n acc_p = 0. * pv\n acc_h = 0. * hv\n\n # make sure the test gets good coverage, ie, that it includes many\n # different activation probs for both detector and pooling layer\n buckets = 10\n bucket_width = 1. / float(buckets)\n for i in xrange(buckets):\n lower_lim = i * bucket_width\n upper_lim = (i+1) * bucket_width\n\n assert np.any((pv >= lower_lim) * (pv < upper_lim))\n assert np.any((hv >= lower_lim) * (hv < upper_lim))\n\n assert upper_lim == 1.\n\n for i in xrange(10000):\n ps, hs = sample_func(zv, top_down_v)\n\n assert ps.shape == pv.shape\n assert hs.shape == hv.shape\n\n acc_p += ps\n acc_h += hs\n\n est_p = acc_p / float(i+1)\n est_h = acc_h / float(i+1)\n\n pd = np.abs(est_p-pv)\n hd = np.abs(est_h-hv)\n\n \"\"\"\n # plot maps of the estimation error, this is to see if it has some\n # spatial pattern this is useful for detecting bugs like not handling\n # the border correctly, etc.\n from pylearn2.gui.patch_viewer import PatchViewer\n\n pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),\n is_color = False)\n for i in xrange(pd.shape[0]):\n for j in xrange(pd.shape[3]):\n pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n\n pv = PatchViewer((hd.shape[0],hd.shape[3]), (hd.shape[1],hd.shape[2]),\n is_color = False)\n for i in xrange(hd.shape[0]):\n for j in xrange(hd.shape[3]):\n pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n \"\"\"\n\n \"\"\"\n plot expectation to estimate versus error in estimation\n expect bigger errors for values closer to 0.5\n\n from matplotlib import pyplot as plt\n\n #nelem = reduce( lambda x, y : x*y, pd.shape)\n #plt.scatter( pv.reshape(nelem), pd.reshape(nelem))\n #plt.show()\n\n nelem = reduce( lambda x, y : x*y, hd.shape)\n plt.scatter( hv.reshape(nelem), hd.reshape(nelem))\n plt.show()\n \"\"\"\n\n # don't really know how tight this should be\n # but you can try to pose an equivalent problem\n # and implement it in another way\n # using a numpy implementation in softmax_acc.py\n # I got a max error of .17\n assert max(pd.max(), hd.max()) < .17\n\n # Do exhaustive checks on just the last sample\n assert np.all((ps == 0) + (ps == 1))\n assert np.all((hs == 0) + (hs == 1))\n\n for k in xrange(batch_size):\n for i in xrange(ps.shape[2]):\n for j in xrange(ps.shape[3]):\n for l in xrange(channels):\n p = ps[k, l, i, j]\n h = hs[k, l, i*pool_rows:(i+1)*pool_rows,\n j*pool_cols:(j+1)*pool_cols]\n assert h.shape == (pool_rows, pool_cols)\n assert p == h.max()\n assert h.sum() <= 1\n\n \"\"\" If you made it to here, it's correctish\n (cant tell if samples are perfectly \"correct\") \"\"\"", "def check_sample_correctishness_channelwise(f):\n\n batch_size = 27\n pool_size = 4\n n = pool_size * 21\n\n rng = np.random.RandomState([2012, 9, 26])\n zv = rng.randn(batch_size, n).astype(config.floatX) * 3.5 - 5.\n top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)\n\n z_th = T.matrix()\n z_th.tag.test_value = zv\n z_th.name = 'z_th'\n\n top_down_th = T.matrix()\n top_down_th.tag.test_value = top_down_v\n top_down_th.name = 'top_down_th'\n\n theano_rng = MRG_RandomStreams(rng.randint(2147462579))\n p_th, h_th, p_sth, h_sth = f(z_th, pool_size, top_down_th, theano_rng)\n\n prob_func = function([z_th, top_down_th], [p_th, h_th])\n pv, hv = prob_func(zv, top_down_v)\n\n sample_func = function([z_th, top_down_th], [p_sth, h_sth])\n\n acc_p = 0. * pv\n acc_h = 0. * hv\n\n # make sure the test gets good coverage, ie, that it includes\n # many different activation probs for both detector and pooling layer\n buckets = 10\n bucket_width = 1. / float(buckets)\n print(pv.min(), pv.max())\n print(hv.min(), hv.max())\n for i in xrange(buckets):\n lower_lim = i * bucket_width\n upper_lim = (i+1) * bucket_width\n print(lower_lim, upper_lim)\n\n assert np.any((pv >= lower_lim) * (pv < upper_lim))\n assert np.any((hv >= lower_lim) * (hv < upper_lim))\n\n assert upper_lim == 1.\n\n for i in xrange(10000):\n ps, hs = sample_func(zv, top_down_v)\n\n assert ps.shape == pv.shape\n assert hs.shape == hv.shape\n\n acc_p += ps\n acc_h += hs\n\n est_p = acc_p / float(i+1)\n est_h = acc_h / float(i+1)\n\n pd = np.abs(est_p-pv)\n hd = np.abs(est_h-hv)\n\n \"\"\"\n # plot maps of the estimation error, this is to see if it has some\n # spatial pattern this is useful for detecting bugs like not handling\n # the border correctly, etc.\n # from pylearn2.gui.patch_viewer import PatchViewer\n\n pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),\n is_color = False)\n for i in xrange(pd.shape[0]):\n for j in xrange(pd.shape[3]):\n pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n\n pv = PatchViewer((hd.shape[0],hd.shape[3]),(hd.shape[1],hd.shape[2]),\n is_color = False)\n for i in xrange(hd.shape[0]):\n for j in xrange(hd.shape[3]):\n pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n \"\"\"\n\n \"\"\"\n plot expectation to estimate versus error in estimation\n expect bigger errors for values closer to 0.5\n\n from matplotlib import pyplot as plt\n\n #nelem = reduce( lambda x, y : x*y, pd.shape)\n #plt.scatter( pv.reshape(nelem), pd.reshape(nelem))\n #plt.show()\n\n nelem = reduce( lambda x, y : x*y, hd.shape)\n plt.scatter( hv.reshape(nelem), hd.reshape(nelem))\n plt.show()\n \"\"\"\n\n # don't really know how tight this should be\n # but you can try to pose an equivalent problem\n # and implement it in another way\n # using a numpy implementation in softmax_acc.py\n # I got a max error of .17\n assert max(pd.max(), hd.max()) < .17\n\n # Do exhaustive checks on just the last sample\n assert np.all((ps == 0) + (ps == 1))\n assert np.all((hs == 0) + (hs == 1))\n\n for k in xrange(batch_size):\n for i in xrange(ps.shape[1]):\n p = ps[k, i]\n h = hs[k, i*pool_size:(i+1)*pool_size]\n assert h.shape == (pool_size,)\n assert p == h.max()\n assert h.sum() <= 1\n\n \"\"\" If you made it to here, it's correctish\n (cant tell if samples are perfectly \"correct\") \"\"\"", "def check_correctness_bc01(f):\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n zv = rng.randn(batch_size, rows, cols,\n channels).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,\n channels).astype(config.floatX)\n\n p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.name = 'z_th'\n zr = z_th.dimshuffle(0, 3, 1, 2)\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.name = 'top_down_th'\n top_down_r = top_down_th.dimshuffle(0, 3, 1, 2)\n\n p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)\n\n func = function([z_th, top_down_th], [p_th.dimshuffle(0, 2, 3, 1),\n h_th.dimshuffle(0, 2, 3, 1)])\n\n pv, hv = func(zv, top_down_v)\n\n assert p_np.shape == pv.shape\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False", "def reset_pooling_layer(self):\n self._semantic_decoder.reset_pooling_layer()\n if self._instance_decoder is not None:\n self._instance_decoder.reset_pooling_layer()", "def test_pooling(self):\n for width in range(2, 5):\n for width2 in range(1, width):\n matrix_size = (4, 5, width)\n matrix = get_random_test_tensor(size=matrix_size)\n pool_size = width2\n for stride in range(1, width2):\n for padding in range(2):\n reference = torch.nn.functional.avg_pool2d(\n matrix.unsqueeze(0), pool_size,\n stride=stride, padding=padding\n )\n\n encrypted_matrix = SharedTensor(matrix)\n encrypted_pool = encrypted_matrix.avg_pool2d(\n pool_size, stride=stride, padding=padding)\n self._check(\n encrypted_pool, reference[0], 'avg_pool2d failed')", "def reset_pooling_layer(self):\n self._aspp.reset_pooling_layer()", "def _testVisually(self):\n num_rows = 6\n num_cols = 6\n tensor_shape = (1, num_rows, num_cols, 1)\n pseudo_random = False\n for overlapping in True, False:\n print(\"-\" * 70)\n print(\"Testing FractionalMaxPool with overlapping = {}\".format(\n overlapping))\n rand_mat = self._PRNG.randint(10, size=tensor_shape)\n pooling_ratio = [1, math.sqrt(2), math.sqrt(2), 1]\n with self.cached_session():\n p, r, c = nn_ops.fractional_max_pool_v2(\n rand_mat,\n pooling_ratio,\n pseudo_random,\n overlapping,\n seed=self._SEED)\n tensor_output, row_seq, col_seq = self.evaluate([p, r, c])\n expected_result = self._GetExpectedFractionalMaxPoolResult(rand_mat,\n row_seq,\n col_seq,\n overlapping)\n print(\"row sequence:\")\n print(row_seq)\n print(\"column sequence:\")\n print(col_seq)\n\n print(\"Input:\")\n # Print input with pooling region marked.\n for i in range(num_rows):\n row_to_print = []\n for j in range(num_cols):\n if j in col_seq:\n row_to_print.append(\"|\")\n row_to_print.append(str(rand_mat[0, i, j, 0]))\n row_to_print.append(\"|\")\n if i in row_seq:\n print(\"-\" * 2 * len(row_to_print))\n print(\" \".join(row_to_print))\n print(\"-\" * 2 * len(row_to_print))\n\n print(\"Output from FractionalMaxPool:\")\n print(tensor_output[0, :, :, 0])\n print(\"Expected result:\")\n print(expected_result[0, :, :, 0])", "def __init__(self):\n #conv1\n n = inp_width*inp_height\n #poczatkowe wagi sieci sa ustalane losowo z rozkladu normalnego. Umieszczane sa one na liscie matryc wag\n self.Weights = [np.random.randn(layers[0][1],inp_channels,layers[0][2],layers[0][2])/np.sqrt(n)]\n out_Size = inp_width - layers[0][2] + 1 #zmienna zawiera rozmiar wyjscia danej warstwy\n #inicjalizacja progow \n self.Biases = [initBias*np.ones( layers[0][1] )]\n #przypisanie parametrow warstwie poolingu\n self.poolParams = [(layers[1][1], layers[1][2])]\n out_Size = out_Size/2 \n #conv 2\n n = out_Size*out_Size*layers[0][1]\n self.Weights.append(np.random.randn(layers[2][1],layers[0][1],layers[2][2],layers[2][2])/np.sqrt(n))\n out_Size = out_Size - layers[2][2]+1\n self.Biases.append(initBias*np.ones(layers[2][1]))\n #pool 2\n self.poolParams.append((layers[3][1],layers[3][2]))\n out_Size = out_Size/2 \n #conv 3\n n = out_Size*out_Size*layers[2][1]\n self.Weights.append(np.random.randn(layers[4][1],layers[2][1],out_Size,out_Size)/np.sqrt(n))\n out_Size = 1\n self.Biases.append(initBias*np.ones(layers[4][1]))\n #fully connected 1\n n = layers[4][1]\n self.Weights.append(np.random.randn(layers[5][1],layers[4][1])/np.sqrt(n))\n self.Biases.append(initBias*np.ones(layers[5][1]))\n #fully connected 2\n n = layers[5][1]\n self.Weights.append(np.random.randn(layers[6][1],layers[5][1])/np.sqrt(n))\n self.Biases.append(initBias*np.ones(layers[6][1]))\n\n self.Weights = np.asarray(self.Weights)\n self.Biases = np.asarray(self.Biases)\n \n delta_W = []\n delta_B = []\n for i in range(5):\n delta_W.append(np.zeros(self.Weights[i].shape))\n delta_B.append(np.zeros(self.Biases[i].shape))\n self.delta_W = np.asarray(delta_W)\n self.delta_B = np.asarray(delta_B)", "def testDivisiblePoolingRatio(self):\n pseudo_random = True\n overlapping = True\n num_batches = 3\n num_channels = 3\n num_rows = 30\n num_cols = 50\n tensor_shape = (num_batches, num_rows, num_cols, num_channels)\n # random tensor with value in [-500.0, 500.0)\n rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500\n self._ValidateFractionalMaxPoolResult(rand_mat, [1, 2, 2, 1], pseudo_random,\n overlapping)", "def check_correctness_channelwise(f):\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n pool_size = 4\n n = 3 * pool_size\n zv = rng.randn(batch_size, n).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)\n\n p_np, h_np = max_pool_channels_python(zv, pool_size, top_down_v)\n\n z_th = T.matrix()\n z_th.name = 'z_th'\n\n top_down_th = T.matrix()\n top_down_th.name = 'top_down_th'\n\n p_th, h_th = f(z_th, pool_size, top_down_th)\n\n func = function([z_th, top_down_th], [p_th, h_th])\n\n pv, hv = func(zv, top_down_v)\n\n assert p_np.shape == pv.shape\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False", "def build_dc_classifier():\n # return nn.Sequential(\n # Unflatten(Batch_size, 1, 28, 28),\n # nn.Conv2d(1, 32, kernel_size=5, stride=1),\n # nn.LeakyReLU(negative_slope=0.01),\n # nn.MaxPool2d(2, stride=2),\n # nn.Conv2d(32, 64, kernel_size=5, stride=1),\n # nn.LeakyReLU(negative_slope=0.01),\n # nn.MaxPool2d(kernel_size=2, stride=2),\n # Flatten(),\n # nn.Linear(4 * 4 * 64, 4 * 4 * 64),\n # nn.LeakyReLU(negative_slope=0.01),\n # nn.Linear(4 * 4 * 64, 1)\n # )\n\n return nn.Sequential(\n Unflatten(Batch_size, 1, 128, 128), #28,28,32 #128,128,16\n nn.Conv2d(1, 16,kernel_size=5, stride=1), #24,24,32 #124,124,16\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(2, stride=2), #12,12,32 #62,62,16\n nn.Conv2d(16, 32,kernel_size=5, stride=1), # 8, 8,64 #58,58,32\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(kernel_size=2, stride=2), # 4, 4,64 #29,29,32\n nn.Conv2d(32, 64, kernel_size=5, stride=1), #25,25,64\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(kernel_size=2, stride=2), #12,12,64\n nn.Conv2d(64, 128, kernel_size=5, stride=1), # 8, 8,128\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(kernel_size=2, stride=2), # 4, 4,128\n Flatten(),\n nn.Linear(4*4*128, 4*4*128), # 4*4*64 # 4*4*128\n nn.LeakyReLU(negative_slope=0.01),\n nn.Linear(4*4*128,1) # 4*4*64 # 4*4*128\n )", "def __init__(self, target_real_label=1.0, target_fake_label=0.0):\n super(GANLocalLoss, self).__init__()\n # self.pooling = nn.MaxPool2d(kernel_size=4, stride=2, padding=1)\n self.adaptivepooling = nn.AdaptiveAvgPool2d(64)", "def __init__(self, num_layers, num_mlp_layers, input_dim, hidden_dim,\n output_dim, final_dropout, learn_eps, graph_pooling_type,\n neighbor_pooling_type):\n super(GIN, self).__init__()\n self.num_layers = num_layers\n self.learn_eps = learn_eps\n\n # List of MLPs\n self.ginlayers = torch.nn.ModuleList()\n self.batch_norms = torch.nn.ModuleList()\n\n for layer in range(self.num_layers):\n if layer == 0:\n mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim)\n else:\n mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim)\n\n self.ginlayers.append(\n GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.learn_eps))\n self.batch_norms.append(nn.BatchNorm1d(hidden_dim))\n\n # Linear function for graph poolings of output of each layer\n # which maps the output of different layers into a prediction score\n self.linears_prediction = torch.nn.ModuleList()\n\n for layer in range(num_layers):\n if layer == 0:\n self.linears_prediction.append(\n nn.Linear(input_dim, output_dim))\n else:\n self.linears_prediction.append(\n nn.Linear(hidden_dim, output_dim))\n\n self.drop = nn.Dropout(final_dropout)\n\n if graph_pooling_type == 'sum':\n self.pool = SumPooling()\n elif graph_pooling_type == 'mean':\n self.pool = AvgPooling()\n elif graph_pooling_type == 'max':\n self.pool = MaxPooling()\n else:\n raise NotImplementedError", "def __init__(self):\n torch.nn.Module.__init__(self)\n ######################### Convolution and pooling layers of VGG-16.\n self.features = torchvision.models.vgg16(pretrained=True).features # fine tune?\n self.features = torch.nn.Sequential(*list(self.features.children())\n [:-22]) # Remove pool2 and rest, lack of computational resource\n # No grad for convVGG\n # for param in self.features.parameters():\n # param.requires_grad = False\n\n #################### Channel Grouping Net\n # self.fc1_ = torch.nn.Linear(128, 128*16)#lack of resource\n # self.fc2_ = torch.nn.Linear(128, 128*16)\n # self.fc3_ = torch.nn.Linear(128, 128*16)\n #\n # torch.nn.init.kaiming_normal_(self.fc1_.weight.data, nonlinearity='relu')\n # if self.fc1_.bias is not None:\n # torch.nn.init.constant_(self.fc1_.bias.data, val=0) # fc层的bias进行constant初始化\n # torch.nn.init.kaiming_normal_(self.fc2_.weight.data, nonlinearity='relu')\n # if self.fc2_.bias is not None:\n # torch.nn.init.constant_(self.fc2_.bias.data, val=0) # fc层的bias进行constant初始化\n # torch.nn.init.kaiming_normal_(self.fc3_.weight.data, nonlinearity='relu')\n # if self.fc3_.bias is not None:\n # torch.nn.init.constant_(self.fc3_.bias.data, val=0) # fc层的bias进行constant初始化\n\n self.fc1 = torch.nn.Linear(128*28*28, 128)\n self.fc2 = torch.nn.Linear(128*28*28, 128)\n self.fc3 = torch.nn.Linear(128*28*28, 128)\n\n\n torch.nn.init.kaiming_normal_(self.fc1.weight.data, nonlinearity='relu')\n if self.fc1.bias is not None:\n torch.nn.init.constant_(self.fc1.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.fc2.weight.data, nonlinearity='relu')\n if self.fc2.bias is not None:\n torch.nn.init.constant_(self.fc2.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.fc3.weight.data, nonlinearity='relu')\n if self.fc3.bias is not None:\n torch.nn.init.constant_(self.fc3.bias.data, val=0) # fc层的bias进行constant初始化\n\n self.layerNorm=nn.LayerNorm([224,224])\n\n # global grad for hook\n self.image_reconstruction = None\n self.register_hooks()\n self.GradWeight=1e-1\n\n # ################### STN input N*3*448*448\n # self.localization = [\n # nn.Sequential(\n # nn.MaxPool2d(4,stride=4),#112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5,stride=1,padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3,stride=1,padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) #output 64*14*14\n # ).cuda(),\n # nn.Sequential(\n # nn.MaxPool2d(4, stride=4), # 112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) # output 64*14*14\n # ).cuda(),\n # nn.Sequential(\n # nn.MaxPool2d(4, stride=4), # 112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) # output 64*14*14\n # ).cuda()\n # ]\n # # Regressor for the 3 * 2 affine matrix\n # self.fc_loc = [\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda(),\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda(),\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda()\n # ]\n # # Initialize the weights/bias with identity transformation\n # for fc_locx in self.fc_loc:\n # fc_locx[2].weight.data.zero_()\n # fc_locx[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))\n\n ########################Bilinear CNN output 256 channels\n self.bcnnConv_1=torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n self.bcnnConv_2 = torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n self.bcnnConv_3 = torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n #BCNN Linear classifier.\n self.bfc1 = torch.nn.Linear(512*512, 200)\n self.bfc2 = torch.nn.Linear(512*512, 200)\n self.bfc3 = torch.nn.Linear(512*512, 200)\n torch.nn.init.kaiming_normal_(self.bfc1.weight.data) # 何凯明初始化\n if self.bfc1.bias is not None:\n torch.nn.init.constant_(self.bfc1.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.bfc2.weight.data) # 何凯明初始化\n if self.bfc2.bias is not None:\n torch.nn.init.constant_(self.bfc2.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.bfc3.weight.data) # 何凯明初始化\n if self.bfc3.bias is not None:\n torch.nn.init.constant_(self.bfc3.bias.data, val=0) # fc层的bias进行constant初始化\n\n # self.CBP1 = CompactBilinearPooling(512, 512, 50000)\n # self.CBP2 = CompactBilinearPooling(512, 512, 50000)\n # self.CBP3 = CompactBilinearPooling(512, 512, 50000)", "def discard_pool ( self ):\n return self.pop_pool() is not None", "def __init__(self, options):\r\n nn.Module.__init__(self)\r\n # Convolution and pooling layers of VGG-16.\r\n self.basemodel = torchvision.models.resnet18(pretrained=True)\r\n self.options = options\r\n\r\n #label\r\n self.label_primary = nn.Linear(options['primary_dim'], options['proj_dim'])\r\n self.label_dual = nn.Linear(options['dual_dim'], options['proj_dim'])\r\n\r\n #classifer/regressor\r\n self.fc_primary = nn.Linear(512 + options['proj_dim'], options['primary_dim'])\r\n self.fc_dual = nn.Linear(512 + options['proj_dim'], options['dual_dim'])\r\n\r\n\r\n if self.options['fc'] == True:\r\n # Freeze all previous layers.\r\n for param in self.basemodel.parameters():\r\n param.requires_grad = False\r\n # Initialize the fc layers.\r\n nn.init.kaiming_normal_(self.fc_primary.weight.data)\r\n if self.fc_primary.bias is not None:\r\n nn.init.constant_(self.fc_primary.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.fc_dual.weight.data)\r\n if self.fc_dual.bias is not None:\r\n nn.init.constant_(self.fc_dual.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.label_primary.weight.data)\r\n if self.label_primary.bias is not None:\r\n nn.init.constant_(self.label_primary.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.label_dual.weight.data)\r\n if self.label_dual.bias is not None:\r\n nn.init.constant_(self.label_dual.bias.data, val=0)\r\n\r\n\r\n else:\r\n for param in self.basemodel.conv1.parameters():\r\n param.requires_grad = False\r\n for param in self.basemodel.bn1.parameters():\r\n param.requires_grad = False\r\n for param in self.basemodel.layer1.parameters():\r\n param.requires_grad = False\r\n #for param in self.basemodel.layer2.parameters():\r\n # param.requires_grad = False\r\n #for param in self.basemodel.layer3.parameters():\r\n # param.requires_grad = False\r", "def clPooling(self, size, stride=(1, 1), mask=(3, 3), maxPool=True):", "def test_thread_pool(self):\n self.assertIdentical(self.tx_client._pool, self.pool)", "def __init__(self):\n\n super(ConvModule, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=[1, 2])\n self.conv1_bn = nn.BatchNorm2d(64)\n self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=[1, 2])\n self.conv2_bn = nn.BatchNorm2d(128)\n self.pool1 = nn.MaxPool2d(kernel_size=4, stride=2)\n self.dropout0 = nn.Dropout(p=0.4)\n\n self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=[1, 2])\n self.conv3_bn = nn.BatchNorm2d(256)\n self.conv4 = nn.Conv2d(in_channels=256, out_channels=64, kernel_size=3, stride=[1, 2])\n self.conv4_bn = nn.BatchNorm2d(64)\n self.pool2 = nn.MaxPool2d(kernel_size=4, stride=2)\n #\n # self.conv5 = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=[1, 2])\n # self.conv5_bn = nn.BatchNorm2d(64)\n # self.pool3 = nn.MaxPool2d(kernel_size=3, stride=[1, 2])", "def testAllInputOptions(self):\n num_batches = 5\n num_channels = 3\n num_rows = 20\n num_cols = 30\n for pseudo_random in True, False:\n for overlapping in True, False:\n tensor_shape = (num_batches, num_rows, num_cols, num_channels)\n # random tensor with value in [-500.0, 500.0)\n rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500\n self._ValidateFractionalMaxPoolResult(\n rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,\n overlapping)", "def unpooling(self):\n return self.__unpooling", "def pool_snps(parent1, parent2):", "def __init__(self, n_channels_list, bn_momentum=0.01, activation='relu'):\n super(MultiResolutionLayer, self).__init__()\n self.n_branches = len(n_channels_list)\n self.fuse_layers = nn.ModuleList()\n for branch_i in range(self.n_branches):\n layer = nn.ModuleList()\n for branch_j in range(self.n_branches):\n if branch_i < branch_j:\n # resolution of branch i is greater than branch_j\n # branch_j will be upsample with nearest resize\n layer.append(nn.Sequential(\n nn.Conv2d(in_channels=n_channels_list[branch_j], out_channels=n_channels_list[branch_i],\n kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(n_channels_list[branch_i], momentum=bn_momentum))\n )\n\n elif branch_i == branch_j:\n # branch i is branch_j\n layer.append(None)\n else:\n # branch_i > branch_j\n # resolution of branch i is greater than branch_j\n # needed to be downsample(stride 2 convolution) branch_i - branch_j times\n downsample_conv = []\n for k in range(branch_i - branch_j):\n if k == branch_i - branch_j - 1:\n downsample_conv.append(\n nn.Sequential(\n nn.Conv2d(\n in_channels=n_channels_list[branch_j],\n out_channels=n_channels_list[branch_i],\n kernel_size=3, stride=2, padding=1,\n bias=False),\n nn.BatchNorm2d(n_channels_list[branch_i], momentum=bn_momentum)))\n else:\n downsample_conv.append(\n nn.Sequential(\n nn.Conv2d(in_channels=n_channels_list[branch_j],\n out_channels=n_channels_list[branch_j],\n kernel_size=3, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(n_channels_list[branch_j], momentum=bn_momentum)))\n\n layer.append(nn.Sequential(*downsample_conv))\n self.fuse_layers.append(layer)\n pass", "def mlp_pool(vecs, names, hid_dim):\n if len(names) == 1:\n if names[0] == \"prefix_att\":\n pool = vecs[-1] #no raw\n elif names[0] == \"cross_att\":\n pool = vecs[-1] #no raw\n elif names[0] == \"concat_att\":\n #no raw\n if len(vecs) == 6:\n pool = fluid.layers.concat(vecs[3:5], axis=1)\n else: \n pool = fluid.layers.concat(vecs[2:4], axis=1)\n else:\n pool = fluid.layers.concat(vecs, axis=1)\n #pool = vecs[0] + vecs[1] + ...\n mlp_vec = fluid.layers.fc(input=pool, size=hid_dim * 2, act=\"leaky_relu\",\n param_attr=fluid.ParamAttr(name='%s_fc_weight' % names[0]),\n bias_attr=fluid.ParamAttr(name='%s_fc_bias' % names[0]))\n else:\n pools = []\n for idx, v in enumerate(vecs):\n vec = fluid.layers.fc(input=v, size=hid_dim, act=\"leaky_relu\",\n param_attr=fluid.ParamAttr(name='%s_fc_weight' % names[idx]),\n bias_attr=fluid.ParamAttr(name='%s_fc_bias' % names[idx]))\n pools.append(vec)\n if len(pools) > 2 and len(pools) % 2 == 0:\n merge_pools = []\n for idx in range(len(pools) / 2):\n v = fluid.layers.concat([pools[idx], pools[idx + len(pools) / 2]], axis=1)\n vec = fluid.layers.fc(input=v, size=hid_dim, act=\"leaky_relu\",\n param_attr=fluid.ParamAttr(name='%s_fc_weight' % names[idx].split('_')[0]),\n bias_attr=fluid.ParamAttr(name='%s_fc_bias' % names[idx].split('_')[0]))\n merge_pools.append(vec)\n pools = merge_pools\n\n mlp_vec = fluid.layers.concat(pools, axis=1)\n return mlp_vec", "def __len__(self):\n return len (self.pool)", "def _is_global_pooling(self, input_shape):\n output_shape = self.compute_output_shape(input_shape).as_list()\n return output_shape[1] == 1 and output_shape[2] == 1", "def __init__(self, depth=7, feature_size=512, use_eql=True, gpu_parallelize=False):\r\n from torch.nn import ModuleList\r\n from CustomLayers import DisGeneralConvBlock, DisFinalBlock, _equalized_conv2d\r\n from torch.nn import Conv2d\r\n\r\n super().__init__()\r\n\r\n assert feature_size != 0 and ((feature_size & (feature_size - 1)) == 0), \\\r\n \"latent size not a power of 2\"\r\n if depth >= 4:\r\n assert feature_size >= np.power(2, depth - 4), \\\r\n \"feature size cannot be produced\"\r\n\r\n # create state of the object\r\n self.gpu_parallelize = gpu_parallelize\r\n self.use_eql = use_eql\r\n self.depth = depth\r\n self.feature_size = feature_size\r\n\r\n # create the fromRGB layers for various inputs:\r\n if self.use_eql:\r\n def from_rgb(out_channels):\r\n return _equalized_conv2d(1, out_channels, (1, 1), bias=True)\r\n else:\r\n def from_rgb(out_channels):\r\n return Conv2d(1, out_channels, (1, 1), bias=True)\r\n\r\n self.rgb_to_features = ModuleList()\r\n self.final_converter = from_rgb(self.feature_size // 2)\r\n\r\n # create a module list of the other required general convolution blocks\r\n self.layers = ModuleList()\r\n self.final_block = DisFinalBlock(self.feature_size, use_eql=self.use_eql)\r\n\r\n # create the remaining layers\r\n for i in range(self.depth - 1):\r\n if i > 2:\r\n layer = DisGeneralConvBlock(\r\n int(self.feature_size // np.power(2, i - 2)),\r\n int(self.feature_size // np.power(2, i - 2)),\r\n use_eql=self.use_eql\r\n )\r\n rgb = from_rgb(int(self.feature_size // np.power(2, i - 1)))\r\n else:\r\n layer = DisGeneralConvBlock(self.feature_size, self.feature_size // 2,\r\n use_eql=self.use_eql)\r\n rgb = from_rgb(self.feature_size // 2)\r\n\r\n self.layers.append(layer)\r\n self.rgb_to_features.append(rgb)\r\n\r\n # just replace the last converter\r\n self.rgb_to_features[self.depth - 2] = \\\r\n from_rgb(self.feature_size // np.power(2, i - 2))\r\n\r\n # parallelize the modules from the module-lists if asked to:\r\n if self.gpu_parallelize:\r\n for i in range(len(self.layers)):\r\n self.layers[i] = torch.nn.DataParallel(self.layers[i])\r\n self.rgb_to_features[i] = torch.nn.DataParallel(\r\n self.rgb_to_features[i])\r\n\r\n # Note that since the FinalBlock contains the StdDev layer,\r\n # it cannot be parallelized so easily. It will have to be parallelized\r\n # from the Lower level (from CustomLayers). This much parallelism\r\n # seems enough for me.\r", "def __init__(self, output_layer_idx=23, min_val=-1.0, max_val=1.0):\n sequence = OrderedDict({\n 'layer0': nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),\n 'layer1': nn.ReLU(inplace=True),\n 'layer2': nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),\n 'layer3': nn.ReLU(inplace=True),\n 'layer4': nn.MaxPool2d(kernel_size=2, stride=2),\n 'layer5': nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),\n 'layer6': nn.ReLU(inplace=True),\n 'layer7': nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),\n 'layer8': nn.ReLU(inplace=True),\n 'layer9': nn.MaxPool2d(kernel_size=2, stride=2),\n 'layer10': nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),\n 'layer11': nn.ReLU(inplace=True),\n 'layer12': nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),\n 'layer13': nn.ReLU(inplace=True),\n 'layer14': nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),\n 'layer15': nn.ReLU(inplace=True),\n 'layer16': nn.MaxPool2d(kernel_size=2, stride=2),\n 'layer17': nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),\n 'layer18': nn.ReLU(inplace=True),\n 'layer19': nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n 'layer20': nn.ReLU(inplace=True),\n 'layer21': nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n 'layer22': nn.ReLU(inplace=True),\n 'layer23': nn.MaxPool2d(kernel_size=2, stride=2),\n 'layer24': nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n 'layer25': nn.ReLU(inplace=True),\n 'layer26': nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n 'layer27': nn.ReLU(inplace=True),\n 'layer28': nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n 'layer29': nn.ReLU(inplace=True),\n 'layer30': nn.MaxPool2d(kernel_size=2, stride=2),\n })\n self.output_layer_idx = output_layer_idx\n self.min_val = min_val\n self.max_val = max_val\n self.mean = torch.from_numpy(np.array(_MEAN_STATS)).view(1, 3, 1, 1)\n self.mean = self.mean.type(torch.FloatTensor)\n super().__init__(sequence)", "def get_spooled(self):\r\n return True", "def test_default_pool_deterministic(self):\n strategy_pool = strategy_selection.generate_default_strategy_pool(\n strategy_list=strategy.AFL_STRATEGY_LIST, use_generator=True)\n\n self.assertTrue(\n strategy_pool.do_strategy(strategy.CORPUS_MUTATION_RADAMSA_STRATEGY))\n self.assertTrue(strategy_pool.do_strategy(strategy.CORPUS_SUBSET_STRATEGY))", "def testDifferentTensorShapes(self):\n pseudo_random = True\n overlapping = True\n for num_batches in [1, 3]:\n for num_channels in [1, 3]:\n for num_rows in [10, 20, 50]:\n for num_cols in [10, 20, 50]:\n tensor_shape = (num_batches, num_rows, num_cols, num_channels)\n # random tensor with value in [-500.0, 500.0)\n rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500\n self._ValidateFractionalMaxPoolResult(\n rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,\n overlapping)", "def __init__(\n self,\n spatial_dims: int,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n stride: int,\n image_size: list[int],\n expand_ratio: int,\n se_ratio: float | None,\n id_skip: bool | None = True,\n norm: str | tuple = (\"batch\", {\"eps\": 1e-3, \"momentum\": 0.01}),\n drop_connect_rate: float | None = 0.2,\n ) -> None:\n super().__init__()\n\n # select the type of N-Dimensional layers to use\n # these are based on spatial dims and selected from MONAI factories\n conv_type = Conv[\"conv\", spatial_dims]\n adaptivepool_type = Pool[\"adaptiveavg\", spatial_dims]\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.id_skip = id_skip\n self.stride = stride\n self.expand_ratio = expand_ratio\n self.drop_connect_rate = drop_connect_rate\n\n if (se_ratio is not None) and (0.0 < se_ratio <= 1.0):\n self.has_se = True\n self.se_ratio = se_ratio\n else:\n self.has_se = False\n\n # Expansion phase (Inverted Bottleneck)\n inp = in_channels # number of input channels\n oup = in_channels * expand_ratio # number of output channels\n if self.expand_ratio != 1:\n self._expand_conv = conv_type(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)\n self._expand_conv_padding = _make_same_padder(self._expand_conv, image_size)\n\n self._bn0 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=oup)\n else:\n # need to have the following to fix JIT error:\n # \"Module 'MBConvBlock' has no attribute '_expand_conv'\"\n\n # FIXME: find a better way to bypass JIT error\n self._expand_conv = nn.Identity()\n self._expand_conv_padding = nn.Identity()\n self._bn0 = nn.Identity()\n\n # Depthwise convolution phase\n self._depthwise_conv = conv_type(\n in_channels=oup,\n out_channels=oup,\n groups=oup, # groups makes it depthwise\n kernel_size=kernel_size,\n stride=self.stride,\n bias=False,\n )\n self._depthwise_conv_padding = _make_same_padder(self._depthwise_conv, image_size)\n self._bn1 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=oup)\n image_size = _calculate_output_image_size(image_size, self.stride)\n\n # Squeeze and Excitation layer, if desired\n if self.has_se:\n self._se_adaptpool = adaptivepool_type(1)\n num_squeezed_channels = max(1, int(in_channels * self.se_ratio))\n self._se_reduce = conv_type(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)\n self._se_reduce_padding = _make_same_padder(self._se_reduce, [1] * spatial_dims)\n self._se_expand = conv_type(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)\n self._se_expand_padding = _make_same_padder(self._se_expand, [1] * spatial_dims)\n\n # Pointwise convolution phase\n final_oup = out_channels\n self._project_conv = conv_type(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)\n self._project_conv_padding = _make_same_padder(self._project_conv, image_size)\n self._bn2 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=final_oup)\n\n # swish activation to use - using memory efficient swish by default\n # can be switched to normal swish using self.set_swish() function call\n self._swish = Act[\"memswish\"](inplace=True)", "def poolCritical(self):\n idle = len(self.pool) - self.__working\n return idle <= 0", "def pooling(self):\n return self.__pooling", "def __call__(self, prediction, fg_mask, image):\n # prediction = self.pooling(prediction)\n # fg_mask = self.pooling(fg_mask)\n N, C, H, W = prediction.size()\n bg = prediction*(1-fg_mask)\n fg = prediction*fg_mask\n\n\n fg_patch = fg.view(N,C,-1).permute(0,2,1)\n bg_patch = bg.view(N,C,-1)\n\n fg_patch_mu = torch.mean(fg_patch, dim=2, keepdim=True)\n bg_patch_mu = torch.mean(bg_patch, dim=1, keepdim=True)\n fg_bg_local_conv = torch.matmul((fg_patch-fg_patch_mu), (bg_patch-bg_patch_mu))/(C-1)\n\n bg_distribution_std = (torch.var(bg_patch, dim=1, keepdim=True) + 1e-8).sqrt()\n fg_distribution_std = (torch.var(fg_patch, dim=2, keepdim=True) + 1e-8).sqrt()\n fg_bg_r = fg_bg_local_conv.div(torch.matmul(fg_distribution_std,bg_distribution_std)+1e-8)\n fg_bg_r = fg_bg_r.abs()\n # fg_bg_r[fg_bg_r<0.7] = 0\n\n pixel_count = H*W\n # # bg_patch_one = bg.unsqueeze(1).repeat(1, pixel_count, 1, 1, 1)\n # # fg_patch_one = fg.view(N,C,-1).permute(0,2,1).unsqueeze(-1).unsqueeze(-1).expand_as(bg_patch_one)\n # bg_patch_one = bg.permute(0,2,1,3).permute(0,1,3,2).unsqueeze(1)\n # fg_patch_one = fg.view(N,C,-1).permute(0,2,1).unsqueeze(-2).unsqueeze(-2)\n # fg_bg_L1 = (fg_patch_one-bg_patch_one).pow(2).mean(dim=-1)\n # fg_bg_L1_drop_fg = fg_bg_L1*(1-fg_mask)\n\n # fg_mask_channel = fg_mask.view(N, -1, 1, 1).expand_as(fg_bg_L1)\n # fg_bg_L1_only_fg = fg_bg_L1_drop_fg*fg_mask_channel\n\n # # fg_bg_local_conv[fg_bg_local_conv<0] = 0\n # # fg_bg_local_conv = torch.softmax(fg_bg_local_conv, dim=2)\n # # local_loss = fg_bg_L1_only_fg.view(N, pixel_count, pixel_count)*fg_bg_local_conv.permute(0,2,1).detach()\n # local_loss = fg_bg_L1_only_fg.view(N, pixel_count, -1)*fg_bg_r\n # fg_mask_sum = fg_mask.view(N, -1).sum(dim=1)\n\n C1 = 0.01**2\n image = self.adaptivepooling(image)\n # image = F.adaptive_avg_pool2d(image, 32)\n # print(image.size())\n image_fg = image*fg_mask\n image_bg = image*(1-fg_mask)\n image_fg_mu = image_fg.mean(dim=1)\n image_bg_mu = image_bg.mean(dim=1)\n image_fg_patch_one = image_fg_mu.view(N, -1,1)\n image_bg_patch_one = image_bg_mu.view(N, -1,1)\n image_fg_patch_one_sq = image_fg_patch_one.pow(2)\n image_bg_patch_one_sq = image_bg_patch_one.pow(2)\n\n luminance = torch.matmul(image_fg_patch_one, image_bg_patch_one.permute(0,2,1)+C1).div(image_fg_patch_one_sq+image_bg_patch_one_sq+C1)\n # image_bg_patch_one = image_bg.permute(0,2,1,3).permute(0,1,3,2).unsqueeze(1)\n # image_fg_patch_one = image_fg.view(N,image_fg.size(1),-1).permute(0,2,1).unsqueeze(-2).unsqueeze(-2)\n # fg_bg_L1 = (image_fg_patch_one-image_bg_patch_one).pow(2).mean(dim=-1)\n fg_bg_loss = luminance\n \n fg_bg_loss_drop_fg = fg_bg_loss*(1-fg_mask.view(N,1, -1))\n fg_mask_channel = fg_mask.view(N, -1, 1).expand_as(fg_bg_loss)\n fg_bg_loss_only_fg = fg_bg_loss_drop_fg*fg_mask_channel\n local_loss = fg_bg_loss_only_fg*fg_bg_r.detach()\n\n local_loss = local_loss.mean()\n loss = local_loss\n # if target_is_real:\n # loss = local_loss # self.relu(1-prediction.mean())\n # else:\n # loss = -local_loss # self.relu(1+prediction.mean())\n return loss", "def __init__(self, image_shape, z_dim, num_blocks, dropout=False,\n subsampling=True, embedding=128):\n super().__init__()\n\n self.image_shape = image_shape\n self.z_dim = z_dim\n self.num_blocks = num_blocks\n\n self.layers = nn.ModuleList()\n\n channels = self.image_shape[2]\n shape_x = self.image_shape[0]\n shape_y = self.image_shape[1]\n\n if subsampling:\n assert shape_x % (2 ** num_blocks) == 0, \\\n 'Image is not evenly divisible by max pooling layer'\n assert shape_y % (2 ** num_blocks) == 0, \\\n 'Image is not evenly divisible by max pooling layer'\n\n for i in range(num_blocks):\n self.layers.append(\n nn.Conv2d(channels, channels * 4, 3, padding=1))\n self.layers.append(nn.ReLU())\n self.layers.append(nn.MaxPool2d(2, 2))\n\n channels = channels * 4\n shape_x = int(shape_x / 2)\n shape_y = int(shape_y / 2)\n\n self.linear_input = channels * shape_x * shape_y\n self.linear = nn.Linear(channels * shape_x * shape_y, z_dim)\n\n else:\n block_shape = [8, 4, 3]\n block_strides = [4, 2, 1]\n filters = [16, 32, 64]\n for i in range(num_blocks):\n self.layers.append(\n nn.Conv2d(channels, filters[i], block_shape[i],\n stride=block_strides[i]))\n self.layers.append(nn.ReLU())\n\n channels = filters[i]\n # calculation taken from https://pytorch.org/docs/stable\n # nn.html#torch.nn.Conv2d\n shape_x = int(((shape_x - (block_shape[i] - 1) - 1) /\n block_strides[i]) + 1)\n shape_y = int(((shape_y - (block_shape[i] - 1) - 1) /\n block_strides[i]) + 1)\n\n self.linear_input = int(channels * shape_x * shape_y)\n self.linear = nn.Linear(self.linear_input, embedding)", "def __init__(self):\n super(Backbone, self).__init__()\n\n # input size: (128, 282, 282)\n # Block 1:\n # relu + 4 conv + bn\n self.conv1 = torch.nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=0)\n self.conv2 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0)\n self.conv3 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0)\n self.conv4 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0)\n\n self.bn1 = torch.nn.BatchNorm2d(64)\n self.bn2 = torch.nn.BatchNorm2d(64)\n self.bn3 = torch.nn.BatchNorm2d(64)\n self.bn4 = torch.nn.BatchNorm2d(64)\n\n # Block 2:\n # relu + 6 conv + stride 2 + bn\n self.conv5 = torch.nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=0)\n self.conv6 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=0)\n self.conv7 = torch.nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=0)\n self.conv8 = torch.nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=0)\n self.conv9 = torch.nn.Conv2d(32, 16, kernel_size=3, stride=1, padding=0)\n self.conv10 = torch.nn.Conv2d(16, 8, kernel_size=3, stride=1, padding=0)\n\n self.bn5 = torch.nn.BatchNorm2d(128)\n self.bn6 = torch.nn.BatchNorm2d(128)\n self.bn7 = torch.nn.BatchNorm2d(64)\n self.bn8 = torch.nn.BatchNorm2d(32)\n self.bn9 = torch.nn.BatchNorm2d(16)\n self.bn10 = torch.nn.BatchNorm2d(8)\n\n # Block 3:\n # 2 fully connected with drop out.\n\n self.fc1 = torch.nn.Linear( 8 * 59 * 59, 32)\n self.fc1_bn = torch.nn.BatchNorm1d(32)\n self.fc_out = torch.nn.Linear(32, 3)", "def test_reversible_block(self):\n for implementation in [0, 1]:\n # same convolution test\n Gm = torch.nn.Conv2d(10 // 2, 10 // 2, (3,3), padding=1)\n dims = (2,10,8,8)\n\n Xdata = np.random.random(dims).astype(np.float32)\n\n X = Variable(torch.from_numpy(Xdata))\n Xshape = X.shape\n rb = revop.ReversibleBlock(Gm, implementation=implementation)\n Y = rb(X)\n X.data.set_()\n self.assertTrue(len(X.data.shape) == 0)\n Y.backward(torch.ones_like(Y))\n\n self.assertTrue(Y.shape == Xshape)\n self.assertTrue(X.data.numpy().shape == Xdata.shape)\n self.assertTrue(np.isclose(X.data.numpy(), Xdata, atol=1e-06).all())", "def test_default_pool_deterministic(self):\n strategy_pool = strategy_selection.generate_default_strategy_pool(\n strategy_list=strategy.LIBFUZZER_STRATEGY_LIST, use_generator=True)\n\n self.assertTrue(\n strategy_pool.do_strategy(strategy.CORPUS_MUTATION_RADAMSA_STRATEGY))\n self.assertTrue(strategy_pool.do_strategy(strategy.CORPUS_SUBSET_STRATEGY))\n self.assertTrue(\n strategy_pool.do_strategy(strategy.RANDOM_MAX_LENGTH_STRATEGY))\n self.assertTrue(strategy_pool.do_strategy(strategy.VALUE_PROFILE_STRATEGY))\n self.assertTrue(strategy_pool.do_strategy(strategy.FORK_STRATEGY))", "def test_pool_dependence(self):\n ok_(id(CSVarPool.pool) == id(VarPool.pool), 'mem address of pools should be the same')\n\n var = 'foo'\n app = 'test'\n VarPool(app=app).set(var, 'bar')\n eq_(CSVarPool.get(var, app=app), 'bar')\n CSVarPool(app=app).set(var, 'foo')\n eq_(VarPool.get(var, app=app), 'foo')", "def test_vc(integrate_activation_vc, max_ch_index, sim_map_max, L, c, h, w):\n # get activation for each seperated vc\n integrate_activation_vc = integrate_activation_vc.view(-1, L, c) # n, L, c\n sim_map_max = sim_map_max.view(-1, L, 1, h, w) # n, L, 1, h, w\n max_ch_index = max_ch_index.view(-1, L) # n, L\n\n all_activation_vcs = []\n\n for i in range(len(integrate_activation_vc)):\n integrate_activation_vc_i = integrate_activation_vc[i] # L, c\n all_activation_vcs.append(integrate_activation_vc_i) \n max_ch_index_i = max_ch_index[i] # L,\n unique_vc_i = torch.unique(max_ch_index_i) # k,\n print(f\"unique_vc_i {unique_vc_i.shape}\")\n for vc_i in unique_vc_i:\n index_vc_i = torch.where(max_ch_index_i != vc_i)[0] # num of miss hit for that vc out of L\n activate_vc_i = integrate_activation_vc_i.clone() # L, c\n activate_vc_i[index_vc_i, :] = 0.\n all_activation_vcs.append(activate_vc_i)\n \n all_activation_vcs = torch.cat(all_activation_vcs, dim=0) # [(nk+n) * L, c]\n \n # decompose each mask \n all_masks = []\n for i in range(len(sim_map_max)):\n sim_map_max_i = sim_map_max[i] # L, 1, h, w\n all_masks.append(sim_map_max_i) \n max_ch_index_i = max_ch_index[i] # L,\n unique_vc_i = torch.unique(max_ch_index_i) # k,\n for vc_i in unique_vc_i:\n index_vc_i = torch.where(max_ch_index_i != vc_i)[0] # num of miss hit for that vc out of L\n vc_sim_map_max_i = sim_map_max_i.clone() # L, 1, h, w\n vc_sim_map_max_i[index_vc_i] = 0.\n all_masks.append(vc_sim_map_max_i)\n all_masks = torch.cat(all_masks, dim=0) # [(nk+n) * L, 1, h, w]\n\n assert all_masks.shape[0] == all_activation_vcs.shape[0], f\"all_masks.shape[0] {all_masks.shape[0]} != all_activation_vcs.shape[0] {all_activation_vcs.shape[0]}\"\n\n return all_activation_vcs, all_masks", "def __init__(self,\n num_class=2,\n layer_nums=(3, 5, 5),\n layer_strides=(2, 2, 2),\n num_filters=(128, 128, 256),\n upsample_strides=(1, 2, 4),\n num_upsample_filters=(256, 256, 256),\n num_input_features=128,\n num_anchor_per_loc=2,\n use_groupnorm=False,\n num_groups=32,\n box_code_size=7,\n num_direction_bins=2):\n super(RPN, self).__init__()\n self._num_anchor_per_loc = num_anchor_per_loc\n self._box_code_size=box_code_size\n self._num_class=num_class\n self._num_direction_bins=num_direction_bins\n assert len(layer_nums) == 3\n assert len(layer_strides) == len(layer_nums)\n assert len(num_filters) == len(layer_nums)\n assert len(upsample_strides) == len(layer_nums)\n assert len(num_upsample_filters) == len(layer_nums)\n upsample_strides=[int(i) for i in upsample_strides]\n\n factors = []\n for i in range(len(layer_nums)):\n assert int(np.prod(\n layer_strides[:i + 1])) % upsample_strides[i] == 0\n factors.append(\n np.prod(layer_strides[:i + 1]) // upsample_strides[i])\n assert all([x == factors[0] for x in factors])\n\n # note that when stride > 1, conv2d with same padding isn't\n # equal to pad-conv2d. we should use pad-conv2d.\n block2_input_filters = num_filters[0]\n if use_groupnorm:\n BatchNorm2d = change_default_args(\n num_groups=num_groups, eps=1e-3)(GroupNorm)\n else:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n\n self.block1 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(num_input_features, num_filters[0], 3,\n stride=layer_strides[0],bias=False),\n BatchNorm2d(num_filters[0]),\n nn.ReLU(),)\n for i in range(layer_nums[0]):\n self.block1.add(\n nn.Conv2d(num_filters[0], num_filters[0], 3,padding=1,bias=False))\n self.block1.add(BatchNorm2d(num_filters[0]))\n self.block1.add(nn.ReLU())\n self.deconv1 = Sequential(\n nn.ConvTranspose2d(num_filters[0],num_upsample_filters[0],\n upsample_strides[0],stride=upsample_strides[0],bias=False),\n BatchNorm2d(num_upsample_filters[0]),\n nn.ReLU(),)\n self.block2 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(block2_input_filters,num_filters[1],3,\n stride=layer_strides[1],bias=False),\n BatchNorm2d(num_filters[1]),\n nn.ReLU(),)\n for i in range(layer_nums[1]):\n self.block2.add(\n nn.Conv2d(num_filters[1], num_filters[1], 3, padding=1,bias=False))\n self.block2.add(BatchNorm2d(num_filters[1]))\n self.block2.add(nn.ReLU())\n self.deconv2 = Sequential(\n nn.ConvTranspose2d(num_filters[1],num_upsample_filters[1],\n upsample_strides[1],stride=upsample_strides[1],bias=False),\n BatchNorm2d(num_upsample_filters[1]),\n nn.ReLU(),)\n self.block3 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(num_filters[1], num_filters[2], 3, stride=layer_strides[2],bias=False),\n BatchNorm2d(num_filters[2]),\n nn.ReLU(),)\n for i in range(layer_nums[2]):\n self.block3.add(nn.Conv2d(num_filters[2], num_filters[2], 3, padding=1,bias=False))\n self.block3.add(BatchNorm2d(num_filters[2]))\n self.block3.add(nn.ReLU())\n self.deconv3 = Sequential(\n nn.ConvTranspose2d(\n num_filters[2],num_upsample_filters[2],\n upsample_strides[2],stride=upsample_strides[2],bias=False),\n BatchNorm2d(num_upsample_filters[2]),\n nn.ReLU(),)\n\n num_cls = num_anchor_per_loc * num_class\n self.conv_cls = nn.Conv2d(sum(num_upsample_filters), num_cls, 1)\n self.conv_box = nn.Conv2d(sum(num_upsample_filters), num_anchor_per_loc * box_code_size, 1)\n self.conv_dir_cls = nn.Conv2d(sum(num_upsample_filters),num_anchor_per_loc * num_direction_bins, 1)", "def __init__(self):\n super(Discriminator, self).__init__()\n\n self.conv = nn.Sequential(\n # conv1\n nn.Conv2d(in_channels=3, out_channels=196, kernel_size=3, padding=1, stride=1),\n nn.LayerNorm(normalized_shape=[32, 32]),\n nn.LeakyReLU(),\n\n # conv2\n nn.Conv2d(in_channels=196, out_channels=196, kernel_size=3, padding=1, stride=2),\n nn.LayerNorm(normalized_shape=[16, 16]),\n nn.LeakyReLU(),\n\n # conv3\n nn.Conv2d(in_channels=196, out_channels=196, kernel_size=3, padding=1, stride=1),\n nn.LayerNorm(normalized_shape=[16, 16]),\n nn.LeakyReLU(),\n\n # conv4\n nn.Conv2d(in_channels=196, out_channels=196, kernel_size=3, padding=1, stride=2),\n nn.LayerNorm(normalized_shape=[8, 8]),\n nn.LeakyReLU(),\n\n # conv5\n nn.Conv2d(in_channels=196, out_channels=196, kernel_size=3, padding=1, stride=1),\n nn.LayerNorm(normalized_shape=[8, 8]),\n nn.LeakyReLU(),\n\n # conv6\n nn.Conv2d(in_channels=196, out_channels=196, kernel_size=3, padding=1, stride=1),\n nn.LayerNorm(normalized_shape=[8, 8]),\n nn.LeakyReLU(),\n\n # conv7\n nn.Conv2d(in_channels=196, out_channels=196, kernel_size=3, padding=1, stride=1),\n nn.LayerNorm(normalized_shape=[8, 8]),\n nn.LeakyReLU(),\n\n # conv8\n nn.Conv2d(in_channels=196, out_channels=196, kernel_size=3, padding=1, stride=2),\n nn.LayerNorm(normalized_shape=[4, 4]),\n nn.LeakyReLU(),\n\n nn.MaxPool2d(kernel_size=4, stride=4, padding=0)\n )\n\n self.fc1 = nn.Linear(196, 1)\n self.fc10 = nn.Linear(196, 10)", "def additional_cloning_checks(self):\n pass", "def test_pool():\n pop = iter([ 'a', 'b', 'c', 'd', 'e' ])\n pop = ops.pool(pop, size=3)\n\n assert(len(pop) == 3)\n assert(pop == [ 'a', 'b', 'c' ])", "def __init__(self, in_channels=4, out1_channels=3, out2_channels=1, bn=True):\n super(SRResNet_RGBY, self).__init__()\n \n self.bn = bn\n self.conv_input = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=9, stride=1, padding=9//2, bias=False)\n #self.relu = nn.LeakyReLU(0.2, inplace=True)\n self.relu = nn.PReLU(num_parameters=1, init=0.2)\n \n self.residual = self.make_layer(_Residual_Block, bn, 16)\n\n self.conv_mid = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)\n if self.bn:\n self.bn_mid = nn.BatchNorm2d(64)\n\n self.upscale4x = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False),\n nn.PixelShuffle(2),\n #nn.LeakyReLU(0.2, inplace=True),\n nn.PReLU(num_parameters=1, init=0.2),\n nn.Conv2d(in_channels=64, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False),\n nn.PixelShuffle(2),\n #nn.LeakyReLU(0.2, inplace=True),\n nn.PReLU(num_parameters=1, init=0.2),\n )\n\n self.conv_output = nn.Conv2d(in_channels=64, out_channels=out1_channels, kernel_size=9, stride=1, padding=4, bias=False)\n self.conv_output2 = nn.Conv2d(in_channels=out1_channels, out_channels=out2_channels, kernel_size=1, stride=1, padding=0, bias=False)\n \n # init the weight of conv2d\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n #init.orthogonal(m.weight, math.sqrt(2))\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n if m.bias is not None:\n m.bias.data.zero_()", "def check_pool(self):\n if self.conn.queue_len() < MAX_PROXIES:\n return True\n return False", "def train(trial_num, image_num, filter_num, filter_size, input_size, channel_num, pooling_rate, left_upper_padding, right_lower_padding):\n\n input_batch_num = 1\n batch_num = 2\n\n init_filters = np.array(np.random.normal(size=filter_num * channel_num *\n filter_size*filter_size), dtype=\"float32\")\n #init_filters = np.array([1.0] * filter_num * channel_num * filter_size * filter_size, dtype=\"float32\")\n init_filters = 0.01 * init_filters.reshape(filter_num, channel_num*filter_size*filter_size)\n\n init_hbias = np.array([-0.1] * filter_num, dtype=\"float32\").reshape(filter_num, 1)\n\n init_vbias = np.array([0.0] * channel_num, dtype=\"float32\").reshape(channel_num, 1)\n\n libnvcrbm = __import__(\"nvcrbm\")\n cur_filters = libnvcrbm.init(filter_num, filter_size, \n input_batch_num, input_size, channel_num,\n pooling_rate, left_upper_padding, right_lower_padding,\n init_filters, init_hbias, init_vbias)\n\n imgs = cPickle.load(open(\"../data/kyoto_large_train.pkl\", \"r\"))\n img_size = imgs[0].shape[0]\n\n for trial_idx in xrange(trial_num):\n for img_idx in xrange(image_num):\n for batch_idx in xrange(batch_num):\n row_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n col_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n #row_idx = np.arange(0, input_size) + 200\n #col_idx = np.arange(0, input_size) + 200\n\n batch_data = imgs[img_idx][row_idx][:,col_idx]\n batch_data = batch_data - batch_data.mean()\n batch_data = np.asarray(batch_data.reshape(1, input_size * input_size), dtype=\"float32\")\n \n libnvcrbm.run_batch(trial_idx, img_idx, batch_idx, batch_data)\n\n libnvcrbm.print_result()\n cur_filters = libnvcrbm.get_gpu_filters()\n dump_filter_image(cur_filters, \"../data/kyoto/filters/trial_%d.png\" % trial_idx)\n\n first_layer = {}\n first_layer[\"filters\"] = cur_filters\n first_layer[\"bias\"] = libnvcrbm.get_gpu_hbias()\n cPickle.dump(first_layer, open(\"../data/first_layer.dat\", \"w+\"))", "def testLargePoolingRatio(self):\n pseudo_random = True\n overlapping = True\n num_batches = 3\n num_channels = 3\n num_rows = 30\n num_cols = 50\n tensor_shape = (num_batches, num_rows, num_cols, num_channels)\n for row_ratio in [math.sqrt(11), math.sqrt(37)]:\n for col_ratio in [math.sqrt(11), math.sqrt(27)]:\n # random tensor with value in [-500.0, 500.0)\n rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500\n self._ValidateFractionalMaxPoolResult(rand_mat,\n [1, row_ratio, col_ratio, 1],\n pseudo_random, overlapping)", "def __init__(self):\n super(CenterExtractor, self).__init__()\n self.pool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)", "def test_layer_instantiation(self):\n model = self.get_layer(in_channels=2, out_channels=3, kernel_size=4)\n\n # Assert the number of elements of the weights.\n tile_weights, tile_biases = model.analog_tile.get_weights()\n\n self.assertEqual(tile_weights.numel(), 2*3*4*4*4)\n if model.use_bias:\n self.assertEqual(tile_biases.numel(), 3)", "def test_layer_instantiation(self):\n model = self.get_layer(in_channels=2, out_channels=3, kernel_size=4)\n\n # Assert the number of elements of the weights.\n tile_weights, tile_biases = model.analog_tile.get_weights()\n\n self.assertEqual(tile_weights.numel(), 2*3*4*4)\n if model.use_bias:\n self.assertEqual(tile_biases.numel(), 3)", "def test_layer_instantiation(self):\n model = self.get_layer(in_channels=2, out_channels=3, kernel_size=4)\n\n # Assert the number of elements of the weights.\n tile_weights, tile_biases = model.analog_tile.get_weights()\n\n self.assertEqual(tile_weights.numel(), 2*3*4)\n if model.use_bias:\n self.assertEqual(tile_biases.numel(), 3)", "def test_sparsity(config):\n total_zeros = 0\n total_nonzeros = 0\n\n print ('<===sparsity type is {}'.format(config.sparsity_type))\n print ('<===layers to be pruned are {}'.format(config._prune_ratios))\n if config.masked_progressive and (config.sparsity_type == 'filter' or config.sparsity_type =='column'or config.sparsity_type == \"bn_filter\" ):\n ### test both column and row sparsity\n print (\"***********checking column sparsity*************\")\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n column_l2_norm = LA.norm(W2d,2,axis=0)\n zero_column = np.sum(column_l2_norm == 0)\n nonzero_column = np.sum(column_l2_norm !=0)\n\n print (\"column sparsity of layer {} is {}\".format(name,zero_column/(zero_column+nonzero_column)))\n print (\"***********checking filter sparsity*************\") \n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n row_l2_norm = LA.norm(W2d,2,axis=1)\n zero_row = np.sum(row_l2_norm == 0)\n nonzero_row = np.sum(row_l2_norm !=0)\n print (\"filter sparsity of layer {} is {}\".format(name,zero_row/(zero_row+nonzero_row)))\n print (\"************checking overall sparsity in conv layers*************\")\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy() \n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros))\n return\n \n if config.sparsity_type == \"irregular\":\n for name,W in config.model.named_parameters():\n if 'bias' in name:\n continue\n W = W.cpu().detach().numpy()\n zeros = np.sum(W==0)\n total_zeros+=zeros\n nonzeros = np.sum(W!=0)\n total_nonzeros+=nonzeros\n print (\"sparsity at layer {} is {}\".format(name,zeros/(zeros+nonzeros)))\n total_weight_number = total_zeros+total_nonzeros\n print ('overal compression rate is {}'.format(total_weight_number/total_nonzeros))\n elif config.sparsity_type == \"column\":\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n column_l2_norm = LA.norm(W2d,2,axis=0)\n zero_column = np.sum(column_l2_norm == 0)\n nonzero_column = np.sum(column_l2_norm !=0)\n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print (\"column sparsity of layer {} is {}\".format(name,zero_column/(zero_column+nonzero_column)))\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros)) \n elif config.sparsity_type == \"filter\":\n print ('inside if')\n print (config.prune_ratios)\n for name,W in config.model.named_parameters():\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n shape = W.shape\n W2d = W.reshape(shape[0],-1)\n row_l2_norm = LA.norm(W2d,2,axis=1)\n zero_row = np.sum(row_l2_norm == 0)\n nonzero_row = np.sum(row_l2_norm !=0)\n total_zeros +=np.sum(W==0)\n total_nonzeros +=np.sum(W!=0)\n print (\"filter sparsity of layer {} is {}\".format(name,zero_row/(zero_row+nonzero_row)))\n print ('only consider conv layers, compression rate is {}'.format((total_zeros+total_nonzeros)/total_nonzeros))\n elif config.sparsity_type == \"bn_filter\":\n print ('inside bn_filter')\n print (config.prune_ratios)\n for i,(name,W) in enumerate(config.model.named_parameters()):\n if name not in config.prune_ratios:\n continue\n W = W.cpu().detach().numpy()\n zeros = np.sum(W==0)\n nonzeros = np.sum(W!=0)\n print (\"sparsity at layer {} is {}\".format(name,zeros/(zeros+nonzeros)))", "def test_reinitialization(conv_model):\n pruned = condense.torch.PruningAgent(conv_model, condense.optimizer.sparsity_functions.Constant(0.7), apply_mask=False)\n pre_pruning = [p.clone().detach().numpy() for p in pruned.model.parameters()]\n\n # search\n with condense.torch.TicketSearch(pruned):\n masks = pruned.model.train(gen, 10)\n\n after_pruning = [p.clone().detach().numpy() for p in pruned.model.parameters()]\n ticket_mask = [m.detach().numpy() for m in pruned.mask.values()]\n\n # reinit tests\n for pre, after, mask in zip(pre_pruning, after_pruning, pruned.mask.values()):\n assert (pre * mask.numpy() == after).all(), 'lottery ticket search params were not reinitialized correctly'\n\n # training on model\n pruned.model.train(gen, 20)\n\n # check if mask changed\n for old, p in zip(ticket_mask, pruned.model.parameters()):\n assert (old == pruned.mask[p].numpy()).all(), 'mask changed during training'\n\n # mask was considered during training\n for param in pruned.to_prune:\n assert ((param.detach().numpy() != 0) == pruned.mask[param].detach().numpy()).all()", "def PXRCmodel(isize, nc, conv_init, ndf=128, bn=True, se=False):\n \n def squeeze_excite_block(tensor, ratio=16):\n \n init = tensor\n filters = init._keras_shape[3]\n se_shape = (1, 1, filters)\n\n se = GlobalAveragePooling2D()(init)\n se = Reshape(se_shape)(se)\n se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)\n se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)\n\n x = multiply([init, se])\n return x\n \n x = inputs = Input(shape=(isize, isize, nc))\n x = Conv2D(filters=ndf, kernel_size=4, strides=1, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = Conv2D(filters=ndf, kernel_size=4, strides=2, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x) \n x = LeakyReLU(alpha=0.2)(x)\n \n \n x = Conv2D(filters=ndf*2, kernel_size=4, strides=2, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = Conv2D(filters=ndf*2, kernel_size=4, strides=1, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = LeakyReLU(alpha=0.2)(x)\n \n\n x = Conv2D(filters=ndf*4, kernel_size=4, strides=2, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = Conv2D(filters=ndf*4, kernel_size=4, strides=1, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = LeakyReLU(alpha=0.2)(x)\n \n \n x = Conv2D(filters=ndf*8, kernel_size=4, strides=2, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = Conv2D(filters=ndf*8, kernel_size=4, strides=1, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = LeakyReLU(alpha=0.2)(x)\n \n \n y = Conv2D(filters=256, kernel_size=(3, 3), padding='same')(x)\n y = Conv2D(filters=256, kernel_size=(3, 3), padding='same')(y)\n y = Conv2D(filters=256, kernel_size=(3, 3), padding='same')(y)\n \n if (bn==True):\n y = BatchNormalization()(y)\n \n y = LeakyReLU()(y)\n y = MaxPool2D()(y)\n y = LeakyReLU()(y)\n \n ###########\n \n y = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(y)\n y = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(y)\n y = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(y)\n \n if (se==True):\n y = squeeze_excite_block(y)\n \n if (bn==True):\n y = BatchNormalization()(y)\n \n y = LeakyReLU()(y)\n y = MaxPool2D()(y)\n y = LeakyReLU()(y)\n \n \n y = GlobalAveragePooling2D()(y)\n predictions = Dense(2, activation='softmax')(y)\n \n return Model(inputs=inputs, outputs=predictions)", "def _pool_op(self, in_obj, pool_axes):\n manual_pad = collections.OrderedDict([(ax.name, (0, 0)) for ax in in_obj.axes])\n pad_int, extra_pad = self._get_pad_int(pool_axes)\n manual_pad.update(extra_pad)\n if any((pad != (0, 0)) for pad in manual_pad.values()):\n in_obj = ng.pad(in_obj, manual_pad.values())\n output_axes = self._output_axes(in_obj,\n pad_int)\n poolparams = make_poolparams(self.pool_type,\n self.pool_shape,\n self.strides,\n pad_int)\n return ng.pooling(poolparams,\n in_obj,\n axes=output_axes)", "def __init__(self, depth=7, latent_size=512, use_eql=True, gpu_parallelize=False):\r\n from torch.nn import ModuleList, Conv2d\r\n from CustomLayers import GenGeneralConvBlock, GenInitialBlock, _equalized_conv2d\r\n\r\n super().__init__()\r\n\r\n assert latent_size != 0 and ((latent_size & (latent_size - 1)) == 0), \\\r\n \"latent size not a power of 2\"\r\n if depth >= 4:\r\n assert latent_size >= np.power(2, depth - 4), \"latent size will diminish to zero\"\r\n\r\n # state of the generator:\r\n self.use_eql = use_eql\r\n self.depth = depth\r\n self.latent_size = latent_size\r\n\r\n # register the modules required for the Generator Below ...\r\n # create the ToRGB layers for various outputs:\r\n if self.use_eql:\r\n def to_rgb(in_channels):\r\n return _equalized_conv2d(in_channels, 1, (1, 1), bias=True)\r\n else:\r\n def to_rgb(in_channels):\r\n return Conv2d(in_channels, 1, (1, 1), bias=True)\r\n\r\n # create a module list of the other required general convolution blocks\r\n self.layers = ModuleList([GenInitialBlock(self.latent_size, use_eql=self.use_eql)])\r\n self.rgb_converters = ModuleList([to_rgb(self.latent_size)])\r\n\r\n # create the remaining layers\r\n for i in range(self.depth - 1):\r\n if i <= 2:\r\n layer = GenGeneralConvBlock(self.latent_size, self.latent_size,\r\n use_eql=self.use_eql)\r\n rgb = to_rgb(self.latent_size)\r\n else:\r\n layer = GenGeneralConvBlock(\r\n int(self.latent_size // np.power(2, i - 3)),\r\n int(self.latent_size // np.power(2, i - 2)),\r\n use_eql=self.use_eql\r\n )\r\n rgb = to_rgb(int(self.latent_size // np.power(2, i - 2)))\r\n self.layers.append(layer)\r\n self.rgb_converters.append(rgb)", "def EmbeddingLayers_pooling(pretrained=False, progress=True, **kwargs):\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 4\n # return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,\n # **kwargs) \n return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],\n pretrained, progress, **kwargs)", "def _crop_pool_layer(bottom, rois, max_pool=True):\n # code modified from\n # https://github.com/ruotianluo/pytorch-faster-rcnn\n # implement it using stn\n # box to affine\n # input (x1,y1,x2,y2)\n rois = rois.detach()\n batch_size = bottom.size(0)\n D = bottom.size(1)\n H = bottom.size(2)\n W = bottom.size(3)\n roi_per_batch = rois.size(0) / batch_size\n x1 = rois[:, 1::4] / 16.0\n y1 = rois[:, 2::4] / 16.0\n x2 = rois[:, 3::4] / 16.0\n y2 = rois[:, 4::4] / 16.0\n\n height = bottom.size(2)\n width = bottom.size(3)\n\n # affine theta\n zero = Variable(rois.data.new(rois.size(0), 1).zero_())\n theta = torch.cat([ \\\n (x2 - x1) / (width - 1),\n zero,\n (x1 + x2 - width + 1) / (width - 1),\n zero,\n (y2 - y1) / (height - 1),\n (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)\n\n if max_pool:\n pre_pool_size = cfg.POOLING_SIZE * 2\n grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, pre_pool_size, pre_pool_size)))\n bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(roi_per_batch, batch_size, D, H, W) \\\n .contiguous().view(-1, D, H, W)\n crops = F.grid_sample(bottom, grid)\n crops = F.max_pool2d(crops, 2, 2)\n else:\n grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, cfg.POOLING_SIZE, cfg.POOLING_SIZE)))\n bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(roi_per_batch, batch_size, D, H, W) \\\n .contiguous().view(-1, D, H, W)\n crops = F.grid_sample(bottom, grid)\n\n return crops, grid", "def __init__(self,\n inplanes,\n planes,\n stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n with_cp=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN')):\n super(Bottleneck, self).__init__()\n assert style in ['pytorch', 'caffe']\n self.inplanes = inplanes\n self.planes = planes\n self.stride = stride\n self.dilation = dilation\n self.style = style\n self.with_cp = with_cp\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n\n if self.style == 'pytorch':\n self.conv1_stride = 1\n self.conv2_stride = stride\n else:\n self.conv1_stride = stride\n self.conv2_stride = 1\n\n self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n self.norm3_name, norm3 = build_norm_layer(\n norm_cfg, planes * self.expansion, postfix=3)\n\n self.conv1 = build_conv_layer(\n conv_cfg,\n inplanes,\n planes,\n kernel_size=1,\n stride=self.conv1_stride,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n\n self.conv2 = build_conv_layer(\n conv_cfg,\n planes,\n planes,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n\n self.add_module(self.norm2_name, norm2)\n self.conv3 = build_conv_layer(\n conv_cfg,\n planes,\n planes * self.expansion,\n kernel_size=1,\n bias=False)\n self.add_module(self.norm3_name, norm3)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample", "def pooler_layer(self):\n return self._pooler_layer", "def max_pool_forward_naive(x, pool_param):\n out = None\n #############################################################################\n # TODO: Implement the max pooling forward pass #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, pool_param)\n return out, cache", "def DCLoss(img, opt):\n maxpool = nn.MaxPool3d((3, opt.patch_size, opt.patch_size), stride=1, padding=(0, opt.patch_size//2, opt.patch_size//2))\n dc = maxpool(1-img[:, None, :, :, :])\n \n target = torch.FloatTensor(dc.shape).zero_().cuda(opt.gpu_ids[0])\n \n loss = L1Loss(reduction='sum')(dc, target)\n return -loss", "def test_multiple_requantize_offload():\n\n def create_model():\n ifm = relay.var(\"input\", shape=(1, 3, 3, 4), dtype=\"int8\")\n cast = relay.cast(ifm, dtype=\"int32\")\n mean = relay.mean(cast, axis=1, keepdims=True)\n requantize = relay.qnn.op.requantize(\n mean,\n input_scale=relay.const(1.0, dtype=\"float32\"),\n input_zero_point=relay.const(0, dtype=\"int32\"),\n output_scale=relay.const(1.0, dtype=\"float32\"),\n output_zero_point=relay.const(0, dtype=\"int32\"),\n )\n requantize = relay.qnn.op.requantize(\n requantize,\n input_scale=relay.const(1.0, dtype=\"float32\"),\n input_zero_point=relay.const(0, dtype=\"int32\"),\n output_scale=relay.const(1.0, dtype=\"float32\"),\n output_zero_point=relay.const(0, dtype=\"int32\"),\n )\n return tvm.IRModule.from_expr(relay.Function([ifm], requantize))\n\n def verify(ext_func):\n # If mean operation and separate requantize were offloaded correctly,\n # there should only be a pooling operation followed by an identity\n # operation leagalized.\n op = ext_func.body\n assert op.op.name == \"contrib.ethosu.identity\"\n op = op.args[0]\n assert ext_func.body.args[0].op.name == \"contrib.ethosu.pooling\"\n op = op.args[0]\n assert isinstance(op, relay.Var)\n\n mod = create_model()\n mod = ethosu.partition_for_ethosu(mod)\n mod = legalize.LegalizeEthosU()(mod)\n verify(mod[\"tvmgen_default_ethos_u_main_0\"])", "def __init__(self, pool_size):\n \n self.pool_size=pool_size;", "def quantize_whole_model(net, bits=8):\n cluster_centers = []\n assert isinstance(net, nn.Module)\n layer_ind = 0\n for n, m in net.named_modules():\n if isinstance(m, PrunedConv):\n \"\"\"\n Apply quantization for the PrunedConv layer.\n --------------Your Code---------------------\n \"\"\"\n\n # Cluster the Weights\n num_centroid = pow(2, bits)\n\n all_weight = m.conv.weight.data.cpu().detach().numpy()\n weight_shape = all_weight.shape\n \n all_weight = all_weight.reshape(-1,1)\n k_init = np.linspace(all_weight.min(), all_weight.max(), num_centroid)\n\n kmeans = KMeans(n_clusters=num_centroid, init=k_init.reshape(-1, 1), n_init=1).fit(all_weight)\n\n # Generate Code Book\n cluster_center = kmeans.cluster_centers_.flatten()\n\n # Quantize\n indexs = kmeans.predict(all_weight)\n indexs = indexs.reshape(weight_shape)\n \n vmap = np.vectorize(lambda x:cluster_center[x])\n m.conv.weight.data = torch.from_numpy(vmap(indexs)).to(device)\n \n _cluster_center = [ \"{0:b}\".format(x).zfill(bits) for x in range(len(cluster_center)) ]\n cluster_centers.append(_cluster_center)\n\n layer_ind += 1\n print(\"Complete %d layers quantization...\" %layer_ind)\n \n elif isinstance(m, PruneLinear):\n \"\"\"\n Apply quantization for the PrunedLinear layer.\n --------------Your Code---------------------\n \"\"\"\n \n # Cluster the Weights\n num_centroid = pow(2, bits)\n \n all_weight = m.linear.weight.data.cpu().detach().numpy()\n weight_shape = all_weight.shape\n\n all_weight = all_weight.reshape(-1,1)\n k_init = np.linspace(all_weight.min(), all_weight.max(), num_centroid)\n \n kmeans = KMeans(n_clusters=num_centroid, init=k_init.reshape(-1, 1), n_init=1).fit(all_weight)\n \n # Generate Code Book\n cluster_center = kmeans.cluster_centers_.flatten()\n\n # Quantize\n indexs = kmeans.predict(all_weight)\n indexs = indexs.reshape(weight_shape)\n\n vmap = np.vectorize(lambda x:cluster_center[x])\n m.linear.weight.data = torch.from_numpy(vmap(indexs)).to(device)\n \n _cluster_center = [ \"{0:b}\".format(x).zfill(bits) for x in range(len(cluster_center)) ]\n cluster_centers.append(_cluster_center)\n \n layer_ind += 1\n print(\"Complete %d layers quantization...\" %layer_ind)\n \n return np.array(cluster_centers)", "def test_ipu_cpu_match(recompute_checkpoint, embedding_serialization_factor):\n import warnings\n\n warnings.filterwarnings(\"ignore\", category=torch.jit.TracerWarning)\n\n # Config\n args = \"\"\"\n --config unit_test\n --lr-schedule constant\n --layers-per-ipu 0 3\n --vocab-size 30400\n --micro-batch-size 10\n --device-iterations 1\n --gradient-accumulation 10\n --enable-half-partials False\n --optimizer AdamW\n --learning-rate 0.001\n \"\"\".split()\n config = BertConfig(**(vars(parse_bert_args(args))))\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n config.recompute_checkpoint_every_layer = recompute_checkpoint\n config.embedding_serialization_factor = embedding_serialization_factor\n\n # Models and options\n opts = get_options(config)\n opts.outputMode(poptorch.OutputMode.Final)\n model_cpu = PipelinedBertForPretraining(config).train()\n model_ipu = PipelinedBertForPretraining(config).parallelize().train()\n model_ipu.load_state_dict(model_cpu.state_dict())\n\n # Check that copy was successful\n assert model_ipu is not model_cpu\n for name, tensor1 in model_cpu.state_dict().items():\n tensor2 = model_ipu.state_dict()[name]\n assert torch.all(tensor1 == tensor2)\n\n optimizer_cpu = torch.optim.AdamW(model_cpu.parameters(), lr=0.001)\n optimizer_ipu = poptorch.optim.AdamW(model_ipu.parameters(), lr=0.001, loss_scaling=1.0)\n poptorch_model = poptorch.trainingModel(model_ipu, opts, optimizer=optimizer_ipu)\n\n # Input\n tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\n inputs = tokenizer(\n \"Hello, my dog is cute Hello, my dog is cute Hello, my dog is cute Hello, my dog is cute Hello, my dog is cute yo\"\n \"Hello, my dog is cute Hello, my dog is cute Hello, my dog is cute Hello, my dog is cute Hello, my dog is cute yo\"\n \"Hello, my dog is cute Hello, my dog is cute Hello, my dog is cute Hello, my dog is cute Hello, my dog is cute yo\"\n \"Hello, my dog is cute Hello, my dog is cute Hello, my dog is cute Hello, my dog is cute Hello, my dog is cute\",\n return_tensors=\"pt\",\n )\n inputs[\"labels\"] = torch.randint(0, config.vocab_size, [1, config.mask_tokens], dtype=torch.long)\n inputs[\"next_sentence_label\"] = torch.randint(0, 1, [1], dtype=torch.long)\n inputs[\"masked_lm_positions\"] = torch.randint(0, config.sequence_length, [1, config.mask_tokens], dtype=torch.long)\n\n batch_size = config.micro_batch_size\n\n batch = (\n inputs[\"input_ids\"].repeat(batch_size, 1),\n inputs[\"attention_mask\"].repeat(batch_size, 1),\n inputs[\"token_type_ids\"].repeat(batch_size, 1),\n inputs[\"masked_lm_positions\"].repeat(batch_size, 1),\n inputs[\"labels\"].repeat(batch_size, 1),\n inputs[\"next_sentence_label\"].repeat(batch_size, 1),\n )\n\n batch_cpu = (\n inputs[\"input_ids\"].repeat(1, 1),\n inputs[\"attention_mask\"].repeat(1, 1),\n inputs[\"token_type_ids\"].repeat(1, 1),\n inputs[\"masked_lm_positions\"].repeat(1, 1),\n inputs[\"labels\"].repeat(1, 1),\n inputs[\"next_sentence_label\"].repeat(1, 1),\n )\n\n # Training Loop\n for step in range(10):\n # Step CPU model\n optimizer_cpu.zero_grad()\n for b in range(batch_size):\n cpu_output = model_cpu(*batch_cpu)\n cpu_loss = cpu_output[0]\n cpu_loss.div(batch_size).backward()\n optimizer_cpu.step()\n\n # Step IPU Model\n ipu_output = poptorch_model(*batch)\n ipu_loss = ipu_output[0]\n\n with torch.no_grad():\n print(f\"CPU Loss: {cpu_loss}, IPU Loss: {ipu_loss}\")\n # Check the losses are approximately equal\n assert np.allclose(cpu_loss.numpy(), ipu_loss.numpy(), atol=1e-6)", "def __init__(self):\n super(Encoder2, self).__init__()\n self.lblocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n ]\n )\n\n self.blocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n ]\n )", "def create(self):\n \n \"\"\" A solo prepressing reduction network in the head \"\"\"\n print(\"pre_reduction\")\n with tf.name_scope('pre_reduction'):\n conv1 = NW.conv(self.X, 7, 7, 64, 2, 2, name='conv1')\n pool1 = NW.max_pool(conv1, 3, 3, 2, 2, name='pool1')\n norm1 = NW.lrn(pool1, 2, 2e-05, 0.75, name='norm1')\n reduction2 = NW.conv(norm1, 1, 1, 64, 1, 1, name='reduction2')\n conv2 = NW.conv(reduction2, 3, 3, 192, 1, 1,name='conv2')\n norm2 = NW.lrn(conv2, 2, 2e-05, 0.75, name='norm2')\n pool2 = NW.max_pool(norm2, 3, 3, 2, 2, name='pool2')\n \n \"\"\" 1st inception layer group \"\"\"\n print(\"icp1\")\n with tf.name_scope('icp1'):\n # branch 0\n icp1_out0 = NW.conv(pool2, 1, 1, 64, 1, 1, name='icp1_out0')\n # branch 1\n icp1_reduction1 = NW.conv(pool2, 1, 1, 96, 1, 1, name='icp1_reduction1')\n icp1_out1 = NW.conv(icp1_reduction1, 3, 3, 128, 1, 1, name='icp1_out1')\n # branch 2\n icp1_reduction2 = NW.conv(pool2, 1, 1, 16, 1, 1, name='icp1_reduction2')\n icp1_out2 = NW.conv(icp1_reduction2, 5, 5, 32, 1, 1, name='icp1_out2')\n # branch 3\n icp1_pool = NW.max_pool(pool2, 3, 3, 1, 1, name='icp1_pool')\n icp1_out3 = NW.conv(icp1_pool, 1, 1, 32, 1, 1, name='icp1_out3')\n # concat\n icp2_in = NW.concat([icp1_out0,\n icp1_out1,\n icp1_out2,\n icp1_out3], 3, 'icp2_in')\n\n \"\"\" 2nd inception layer group \"\"\"\n print(\"icp2\")\n with tf.name_scope('icp2'):\n # branch 0\n icp2_out0 = NW.conv(icp2_in, 1, 1, 128, 1, 1, name='icp2_out0')\n # branch 1\n icp2_reduction1 = NW.conv(icp2_in, 1, 1, 128, 1, 1, name='icp2_reduction1')\n icp2_out1 = NW.conv(icp2_reduction1, 3, 3, 192, 1, 1, name='icp2_out1')\n # branch 2\n icp2_reduction2 = NW.conv(icp2_in, 1, 1, 32, 1, 1, name='icp2_reduction2')\n icp2_out2 = NW.conv(icp2_reduction2, 5, 5, 96, 1, 1, name='icp2_out2')\n # branch 3\n icp2_pool = NW.max_pool(icp2_in, 3, 3, 1, 1, name='icp2_pool')\n icp2_out3 = NW.conv(icp2_pool, 1, 1, 64, 1, 1, name='icp2_out3')\n # concat\n icp2_out = NW.concat([icp2_out0,\n icp2_out1,\n icp2_out2,\n icp2_out3], 3, 'icp2_out')\n \n \"\"\" 3rd inception layer group \"\"\"\n print(\"icp3\")\n with tf.name_scope('icp3'):\n icp3_in = NW.max_pool(icp2_out, 3, 3, 2, 2, name='icp3_in')\n # branch 0\n icp3_out0 = NW.conv(icp3_in, 1, 1, 192, 1, 1, name='icp3_out0')\n # branch 1\n icp3_reduction1 = NW.conv(icp3_in, 1, 1, 96, 1, 1, name='icp3_reduction1')\n icp3_out1 = NW.conv(icp3_reduction1, 3, 3, 208, 1, 1, name='icp3_out1')\n # branch 2\n icp3_reduction2 = NW.conv(icp3_in, 1, 1, 16, 1, 1, name='icp3_reduction2')\n icp3_out2 = NW.conv(icp3_reduction2, 5, 5, 48, 1, 1, name='icp3_out2')\n # branch 3\n icp3_pool = NW.max_pool(icp3_in, 3, 3, 1, 1, name='icp3_pool')\n icp3_out3 = NW.conv(icp3_pool, 1, 1, 64, 1, 1, name='icp3_out3')\n # concat\n icp3_out = NW.concat([icp3_out0,\n icp3_out1,\n icp3_out2,\n icp3_out3], 3, 'icp3_out')\n \n \"\"\" 1st classify branch \"\"\"\n with tf.name_scope('cls1'):\n cls1_pool = NW.avg_pool(icp3_out, 5, 5, 3, 3, padding='VALID', name='cls1_pool')\n cls1_reduction_pose = NW.conv(cls1_pool, 1, 1, 128, 1, 1, name='cls1_reduction_pose')\n cls1_fc1_pose = NW.fc(cls1_reduction_pose, 1024, name='cls1_fc1_pose')\n cls1_fc_pose_xy = NW.fc(cls1_fc1_pose, 2, relu=False, name='cls1_fc_pose_xy')\n cls1_fc_pose_ab = NW.fc(cls1_fc1_pose, 2, relu=False, name='cls1_fc_pose_ab')\n self.layers[\"cls1_fc_pose_xy\"] = cls1_fc_pose_xy\n self.layers[\"cls1_fc_pose_ab\"] = cls1_fc_pose_ab\n \n \"\"\" 4st inception layer group \"\"\"\n print(\"icp4\")\n with tf.name_scope('icp4'):\n # branch 0\n icp4_out0 = NW.conv(icp3_out, 1, 1, 160, 1, 1, name='icp4_out0')\n # branch 1\n icp4_reduction1 = NW.conv(icp3_out, 1, 1, 112, 1, 1, name='icp4_reduction1')\n icp4_out1 = NW.conv(icp4_reduction1, 3, 3, 224, 1, 1, name='icp4_out1')\n # branch 2\n icp4_reduction2 = NW.conv(icp3_out, 1, 1, 24, 1, 1, name='icp4_reduction2')\n icp4_out2 = NW.conv(icp4_reduction2, 5, 5, 64, 1, 1, name='icp4_out2')\n # branch 3\n icp4_pool = NW.max_pool(icp3_out, 3, 3, 1, 1, name='icp4_pool')\n icp4_out3 = NW.conv(icp4_pool, 1, 1, 64, 1, 1, name='icp4_out3')\n # concat\n icp4_out = NW.concat([icp4_out0,\n icp4_out1,\n icp4_out2,\n icp4_out3],3, name='icp4_out')\n\n \"\"\" 5st inception layer group \"\"\"\n print(\"icp5\")\n with tf.name_scope('icp5'):\n # branch 0\n icp5_out0 = NW.conv(icp4_out, 1, 1, 128, 1, 1, name='icp5_out0')\n # branch 1\n icp5_reduction1 = NW.conv(icp4_out, 1, 1, 128, 1, 1, name='icp5_reduction1')\n icp5_out1 = NW.conv(icp5_reduction1, 3, 3, 256, 1, 1, name='icp5_out1')\n # branch 2\n icp5_reduction2 = NW.conv(icp4_out,1, 1, 24, 1, 1, name='icp5_reduction2')\n icp5_out2 = NW.conv(icp5_reduction2, 5, 5, 64, 1, 1, name='icp5_out2')\n # branch 3\n icp5_pool = NW.max_pool(icp4_out,3, 3, 1, 1, name='icp5_pool')\n icp5_out3 = NW.conv(icp5_pool, 1, 1, 64, 1, 1, name='icp5_out3')\n # concat\n icp5_out = NW.concat([icp5_out0, \n icp5_out1, \n icp5_out2, \n icp5_out3], 3, name='icp5_out')\n \n \"\"\" 6st inception layer group \"\"\"\n print(\"icp6\")\n with tf.name_scope('icp6'):\n # branch 0\n icp6_out0 = NW.conv(icp5_out, 1, 1, 112, 1, 1, name='icp6_out0')\n # branch 1\n icp6_reduction1 = NW.conv(icp5_out, 1, 1, 144, 1, 1, name='icp6_reduction1')\n icp6_out1 = NW.conv(icp6_reduction1, 3, 3, 288, 1, 1, name='icp6_out1')\n # branch 2\n icp6_reduction2 = NW.conv(icp5_out, 1, 1, 32, 1, 1, name='icp6_reduction2')\n icp6_out2 = NW.conv(icp6_reduction2, 5, 5, 64, 1, 1, name='icp6_out2')\n # branch 3\n icp6_pool = NW.max_pool(icp5_out,3, 3, 1, 1, name='icp6_pool')\n icp6_out3 = NW.conv(icp6_pool, 1, 1, 64, 1, 1, name='icp6_out3')\n # concat\n icp6_out = NW.concat([icp6_out0,\n icp6_out1,\n icp6_out2,\n icp6_out3], 3, name='icp6_out')\n\n \"\"\" 2nd classify branch \"\"\"\n with tf.name_scope('cls2'):\n cls2_pool = NW.avg_pool(icp6_out, 5, 5, 3, 3, padding='VALID', name='cls2_pool')\n cls2_reduction_pose = NW.conv(cls2_pool, 1, 1, 128, 1, 1, name='cls2_reduction_pose')\n cls2_fc1 = NW.fc(cls2_reduction_pose, 1024, name='cls2_fc1')\n cls2_fc_pose_xy = NW.fc(cls2_fc1, 2, relu=False, name='cls2_fc_pose_xy')\n cls2_fc_pose_ab = NW.fc(cls2_fc1, 2, relu=False, name='cls2_fc_pose_ab')\n self.layers[\"cls2_fc_pose_xy\"] = cls2_fc_pose_xy\n self.layers[\"cls2_fc_pose_ab\"] = cls2_fc_pose_ab\n\n \"\"\" 7st inception layer group \"\"\"\n print(\"icp7\")\n with tf.name_scope('icp7'):\n # branch 0\n icp7_out0 = NW.conv(icp6_out, 1, 1, 256, 1, 1, name='icp7_out0')\n # branch 1\n icp7_reduction1 = NW.conv(icp6_out, 1, 1, 160, 1, 1, name='icp7_reduction1')\n icp7_out1 = NW.conv(icp7_reduction1, 3, 3, 320, 1, 1, name='icp7_out1')\n # branch 2\n icp7_reduction2 = NW.conv(icp6_out, 1, 1, 32, 1, 1, name='icp7_reduction2')\n icp7_out2 = NW.conv(icp7_reduction2, 5, 5, 128, 1, 1, name='icp7_out2')\n # branch 3\n icp7_pool = NW.max_pool(icp6_out, 3, 3, 1, 1, name='icp7_pool')\n icp7_out3 = NW.conv(icp7_pool, 1, 1, 128, 1, 1, name='icp7_out3')\n # concat\n icp7_out = NW.concat([icp7_out0,\n icp7_out1,\n icp7_out2,\n icp7_out3], 3, name='icp7_out')\n\n \"\"\" 8st inception layer group \"\"\"\n print(\"icp8\")\n with tf.name_scope('icp8'):\n icp8_in = NW.max_pool(icp7_out, 3, 3, 2, 2, name='icp8_in')\n # branch 0\n icp8_out0 = NW.conv(icp8_in, 1, 1, 256, 1, 1, name='icp8_out0')\n # branch 1\n icp8_reduction1 = NW.conv(icp8_in, 1, 1, 160, 1, 1, name='icp8_reduction1')\n icp8_out1 = NW.conv(icp8_reduction1, 3, 3, 320, 1, 1, name='icp8_out1')\n # branch 2\n icp8_reduction2 = NW.conv(icp8_in, 1, 1, 32, 1, 1, name='icp8_reduction2')\n icp8_out2 = NW.conv(icp8_reduction2, 5, 5, 128, 1, 1, name='icp8_out2')\n # branch 3\n icp8_pool = NW.max_pool(icp8_in, 3, 3, 1, 1, name='icp8_pool')\n icp8_out3 = NW.conv(icp8_pool, 1, 1, 128, 1, 1, name='icp8_out3')\n # concat\n icp8_out = NW.concat([icp8_out0,\n icp8_out1,\n icp8_out2,\n icp8_out3], 3, name='icp8_out')\n \n \"\"\" 9st inception layer group \"\"\"\n print(\"icp9\")\n with tf.name_scope('icp9'):\n # branch 0\n icp9_out0 = NW.conv(icp8_out, 1, 1, 384, 1, 1, name='icp9_out0')\n # branch 1\n icp9_reduction1 = NW.conv(icp8_out, 1, 1, 192, 1, 1, name='icp9_reduction1')\n icp9_out1 = NW.conv(icp9_reduction1, 3, 3, 384, 1, 1, name='icp9_out1')\n # branch 2\n icp9_reduction2 = NW.conv(icp8_out, 1, 1, 48, 1, 1, name='icp9_reduction2')\n icp9_out2 = NW.conv(icp9_reduction2, 5, 5, 128, 1, 1, name='icp9_out2')\n # branch 3\n icp9_pool = NW.max_pool(icp8_out, 3, 3, 1, 1, name='icp9_pool')\n icp9_out3 = NW.conv(icp9_pool, 1, 1, 128, 1, 1, name='icp9_out3')\n # concat\n icp9_out = NW.concat([icp9_out0,\n icp9_out1,\n icp9_out2,\n icp9_out3], 3, name='icp9_out')\n\n \"\"\" 3rd classify branch \"\"\"\n with tf.name_scope('cls3'):\n cls3_pool = NW.avg_pool(icp9_out, 7, 7, 1, 1, padding='VALID', name='cls3_pool')\n cls3_fc1_pose = NW.fc(cls3_pool, 2048, name='cls3_fc1_pose')\n cls3_fc_pose_xy = NW.fc(cls3_fc1_pose, 2, relu=False, name='cls3_fc_pose_xy')\n cls3_fc_pose_ab = NW.fc(cls3_fc1_pose, 2, relu=False, name='cls3_fc_pose_ab')\n self.layers[\"cls3_fc_pose_xy\"] = cls3_fc_pose_xy\n self.layers[\"cls3_fc_pose_ab\"] = cls3_fc_pose_ab", "def __init__(self, ):\n super().__init__()\n channels = 3\n\n # Initial convolution block\n out_features = 64\n # encoder\n self.input = nn.Sequential(\n nn.ReflectionPad2d(channels),\n nn.Conv2d(3, out_features, (7, 7)),\n nn.InstanceNorm2d(out_features),\n nn.MaxPool2d(2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(out_features, out_features * 2, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(out_features * 2, out_features * 4, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.in0 = nn.InstanceNorm2d(256)\n self.block0 = blocks()\n self.block1 = blocks()\n self.block2 = blocks()\n self.block3 = blocks()\n self.block4 = blocks()\n self.block5 = blocks()\n self.block6 = blocks()\n self.block7 = blocks()\n\n self.out = nn.Sequential(\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 4, out_features * 2, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 2, out_features, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.ReflectionPad2d(channels),\n nn.Conv2d(out_features, channels, 7),\n nn.Tanh(),\n )", "def __init__(self, input_dim=(3,32,32), num_filters=[32], hidden_layers=[100], \r\n num_classes=10 ,filter_size=7, weight_scale=1e-3, reg=0, dropout=0, \r\n use_batch_norm=False, dtype=np.float32):\r\n self.params={}\r\n self.use_dropout = dropout > 0\r\n self.use_batch_norm = use_batch_norm\r\n self.conv_params = {'stride': 1, 'pad': (filter_size - 1) // 2}\r\n self.pool_params = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\r\n self.num_conv_layers = len(num_filters)\r\n self.num_hidden_layers = len(hidden_layers)\r\n self.bn_params = []\r\n self.dropout_params = []\r\n self.reg = reg\r\n\r\n # Initialize batch normalization parameters if necessary.\r\n num_layers = self.num_conv_layers + self.num_hidden_layers\r\n if self.use_batch_norm:\r\n for i in range(num_layers):\r\n self.bn_params.append({'mode':'train'})\r\n # Initialize dropout parameters if necessary\r\n if self.use_dropout:\r\n self.dropout_params = {'mode':'trian', 'p':dropout}\r\n\r\n C, H, W = input_dim\r\n channels, HH, WW = C, H, W\r\n # Initialize the parameters for the Convolutional network.\r\n for i in range(1, self.num_conv_layers+1):\r\n self.params['W{}'.format(i)] = np.random.randn(num_filters[i-1], \r\n channels, filter_size, \r\n filter_size) * weight_scale\r\n self.params['b{}'.format(i)] = np.zeros(num_filters[i-1])\r\n # Keeping track of the Height and Width of the image as we convolve\r\n # it through multiple layers. After pooling make sure the dimensions\r\n # make sense\r\n if (HH <= self.pool_params['pool_height']):\r\n raise Exception('The pool height and input height are equal'.\\\r\n format(self.pool_params['pool_height'], HH))\r\n else:\r\n HH = (HH - self.pool_params['pool_height']) / self.pool_params['stride'] + 1\r\n if (WW <= self.pool_params['pool_width']):\r\n raise Exception('The pool width and input width are equal'.\\\r\n format(self.params['pool_width'], WW))\r\n else:\r\n WW = (WW - self.pool_params['pool_width']) / self.pool_params['stride'] + 1\r\n\r\n\r\n # Updating the number of channels for the new input.\r\n channels = num_filters[i-1]\r\n # Initialize the parameters for the batch normalization if necessary.\r\n if self.use_batch_norm:\r\n self.params['gamma{}'.format(i)] = np.ones(channels)\r\n self.params['beta{}'.format(i)] = np.zeros(channels)\r\n\r\n # Initialize the parameters for the fully connected network.\r\n fc_input_dim = np.prod((HH, WW, channels))\r\n for i in range(1, self.num_hidden_layers+1):\r\n self.params['W{}'.format(i+self.num_conv_layers)] = np.random.randn(fc_input_dim, \r\n hidden_layers[i-1]) * weight_scale\r\n self.params['b{}'.format(i+self.num_conv_layers)] = np.zeros(hidden_layers[i-1])\r\n # Initialize the parameters for batch normalization if necessary.\r\n if self.use_batch_norm:\r\n self.params['gamma{}'.format(i+self.num_conv_layers)] = np.ones(hidden_layers[i-1])\r\n self.params['beta{}'.format(i+self.num_conv_layers)] = np.zeros(hidden_layers[i-1])\r\n fc_input_dim = hidden_layers[i-1]\r\n\r\n # Initialize the parameters for the last layer of the fully connected network.\r\n self.params['W{}'.format(i+self.num_conv_layers+1)] = np.random.randn(hidden_layers[i-1],\r\n num_classes) * weight_scale\r\n self.params['b{}'.format(i+self.num_conv_layers+1)] = np.zeros(num_classes)\r\n\r\n # Convert the dtype for the parameters of the model.\r\n for k, v in self.params.items():\r\n self.params[k] = v.astype(dtype)", "def testDifferentInputTensorShape(self):\n with self.cached_session() as sess:\n input_holder = array_ops.placeholder(dtypes.float32,\n [None, None, None, 3])\n pooling_ratio = [1, 1.5, 1.5, 1]\n pseudo_random = False\n overlapping = False\n p, r, c = nn_ops.fractional_max_pool_v2(\n input_holder,\n pooling_ratio,\n pseudo_random,\n overlapping,\n seed=self._SEED)\n # First run.\n input_a = np.zeros([3, 32, 32, 3])\n actual, row_seq, col_seq = sess.run([p, r, c], {input_holder: input_a})\n expected = self._GetExpectedFractionalMaxPoolResult(\n input_a, row_seq, col_seq, overlapping)\n self.assertSequenceEqual(expected.shape, actual.shape)\n # Second run.\n input_b = np.zeros([4, 45, 45, 3])\n actual, row_seq, col_seq = sess.run([p, r, c], {input_holder: input_b})\n expected = self._GetExpectedFractionalMaxPoolResult(\n input_b, row_seq, col_seq, overlapping)\n self.assertSequenceEqual(expected.shape, actual.shape)", "def __init__(self):\n super(Encoder3, self).__init__()\n self.lblocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n ]\n )\n\n self.blocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n ]\n )", "def __init__(self, pool_size, p=.5):\n self.pool_size = pool_size\n if self.pool_size > 0:\n self.num_images = 0\n self.images = []\n self.p = p", "def __init__(self):\n super(SCNN, self).__init__()\n\n # Linear classifier.\n self.inplanes = 128\n self._norm_layer = nn.BatchNorm2d\n self.dilation = 1\n self.groups = 1\n self.base_width = 64\n\n self.num_class = 125\n backbone = torchvision.models.resnet34(pretrained=True)\n self.shared_features = nn.Sequential(*list(backbone.children())[0:6])\n #self.realistic_head = nn.Sequential(*list(backbone.children())[6:8])\n # self.synthetic_head = nn.Sequential(nn.Conv2d(128, 128, 3, 2, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 256, 3, 2, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True),\n # nn.Conv2d(256, 256, 3, 1, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True))\n\n self.synthetic_head1 = self._make_layer(BasicBlock, 128, 1, stride=2, dilate=False)\n self.synthetic_head2 = self._make_layer(BasicBlock, 256, 1, stride=2, dilate=False)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.classifier = nn.Linear(256, self.num_class)\n\n for m in self.synthetic_head1.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for m in self.synthetic_head2.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n weight_init(self.classifier)\n\n for param in self.shared_features.parameters():\n param.requires_grad = False", "def test_from_pytorch_training_classification(self):\n import torch.nn as nn\n import torch.nn.functional as F\n\n class CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=1)\n self.fc1 = nn.Linear(16 * 13 * 13, 100)\n self.fc2 = nn.Linear(100, 2)\n\n def forward(self, x):\n out = self.conv1(x)\n out = F.relu(out)\n out = F.max_pool2d(out, 2)\n out = out.view(-1, 16 * 13 * 13)\n out = self.fc1(out)\n out = F.relu(out)\n out = self.fc2(out)\n out = F.softmax(out, dim=1)\n return out\n\n model_plaintext = CNN()\n batch_size = 5\n x_orig = get_random_test_tensor(size=(batch_size, 1, 28, 28), is_float=True)\n y_orig = (\n get_random_test_tensor(size=(batch_size, 1), is_float=True).gt(0).long()\n )\n y_one_hot = onehot(y_orig, num_targets=2)\n\n # encrypt training sample:\n x_train = crypten.cryptensor(x_orig, requires_grad=True)\n y_train = crypten.cryptensor(y_one_hot)\n dummy_input = torch.empty((1, 1, 28, 28))\n\n for loss_name in [\"BCELoss\", \"CrossEntropyLoss\"]:\n # create encrypted model\n model = crypten.nn.from_pytorch(model_plaintext, dummy_input)\n model.train()\n model.encrypt()\n\n self._check_training(model, x_train, y_train, loss_name)\n\n self._check_model_export(model, x_train)", "def test_ModulatedDeformableConvolution():\n net = nn.HybridSequential()\n net.add(\n nn.DeformableConvolution(10, kernel_size=(3, 3), strides=1, padding=0),\n nn.DeformableConvolution(10, kernel_size=(1, 1), strides=1, padding=0),\n nn.DeformableConvolution(10, kernel_size=(5, 5), strides=1, padding=0),\n nn.DeformableConvolution(10, kernel_size=(3, 5), strides=1, padding=0),\n nn.DeformableConvolution(10, kernel_size=(5, 1), strides=1, padding=0, num_deformable_group=2),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n offset_use_bias=False, use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n offset_use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, activation='relu',\n use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, offset_use_bias=False, use_bias=False),\n nn.DeformableConvolution(10, kernel_size=(3, 2), strides=1, padding=0, offset_use_bias=False),\n nn.DeformableConvolution(12, kernel_size=(3, 2), strides=1, padding=0, use_bias=False),\n nn.DeformableConvolution(12, kernel_size=(3, 2), strides=1, padding=0, use_bias=False, num_deformable_group=4),\n )\n\n ctx = default_context()\n net.initialize(force_reinit=True, ctx=ctx)\n net.hybridize()\n\n x = mx.nd.random.uniform(shape=(8, 5, 30, 31), ctx=ctx)\n with mx.autograd.record():\n y = net(x)", "def __init__(self, dropout=0, input_dim=(3, 32, 32), num_filters=32, filter_size=7,\r\n hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0, \r\n use_batch_norm=False, dtype=np.float32):\r\n self.use_dropout = dropout > 0\r\n self.use_batch_norm = use_batch_norm\r\n self.params = {}\r\n self.reg = reg\r\n self.num_layers = 3\r\n self.dtype = dtype\r\n self.pool_height = 2\r\n self.pool_width = 2\r\n self.pool_stride = 2\r\n\r\n ############################################################################\r\n # TODO: Initialize weights and biases for the three-layer convolutional #\r\n # network. Weights should be initialized from a Gaussian with standard #\r\n # deviation equal to weight_scale; biases should be initialized to zero. #\r\n # All weights and biases should be stored in the dictionary self.params. #\r\n # Store weights and biases for the convolutional layer using the keys 'W1' #\r\n # and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #\r\n # hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #\r\n # of the output affine layer. #\r\n ############################################################################\r\n # NUmber of channels\r\n C, H, W = input_dim\r\n self.params['W1'] = np.random.randn(num_filters, C, filter_size, filter_size) * weight_scale\r\n self.params['b1'] = np.zeros(num_filters)\r\n H_pool = (H - self.pool_height) / 2 + 1\r\n W_pool = (W - self.pool_width) / 2 + 1\r\n self.params['W2'] = np.random.randn(np.prod((num_filters, H_pool, W_pool)), hidden_dim) * weight_scale\r\n self.params['b2'] = np.zeros(hidden_dim)\r\n self.params['W3'] = np.random.randn(hidden_dim, num_classes) * weight_scale\r\n self.params['b3'] = np.zeros(num_classes)\r\n\r\n # Initialize the parameters for batch normalization if necessary\r\n if self.use_batch_norm:\r\n self.params['gamma1'] = np.ones(num_filters) \r\n self.params['beta1'] = np.zeros(num_filters)\r\n self.params['gamma2'] = np.ones(hidden_dim)\r\n self.params['beta2'] = np.zeros(hidden_dim)\r\n\r\n # Set dropout parameters if necessary\r\n self.dropout_param={}\r\n if self.use_dropout:\r\n self.dropout_param ={'mode':'train', 'p':dropout}\r\n\r\n self.bn_params = []\r\n if self.use_batch_norm:\r\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\r\n\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n for k, v in self.params.items():\r\n self.params[k] = v.astype(dtype)", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.rpn_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.rpn_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.rpn_cls = nn.Conv2d(\n self.feat_channels, self.num_anchors * 1, 3, padding=1)\n self.rpn_reg = nn.Conv2d(\n self.feat_channels, self.num_anchors * 4 * (self.reg_max + 1), 3, padding=1)\n self.rpn_iou = nn.Conv2d(\n self.feat_channels, self.num_anchors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.anchor_generator.strides])\n\n ##############V2################\n conf_vector = [nn.Conv2d(self.num_anchors * 4 * self.total_dim, self.num_anchors * self.reg_channels, 1)]\n conf_vector += [self.relu]\n conf_vector += [nn.Conv2d(self.num_anchors * self.reg_channels, self.num_anchors, 1), nn.Sigmoid()]\n\n self.reg_conf = nn.Sequential(*conf_vector)\n ##############V2################", "def __init__(self, num_1d=None):\n super(Net, self).__init__()\n\n self.lconv1 = nn.Sequential(\n nn.Conv1d(4, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n )\n\n self.conv1 = nn.Sequential(\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n )\n\n self.lconv2 = nn.Sequential(\n nn.MaxPool1d(kernel_size=4, stride=4),\n nn.Conv1d(64, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n )\n\n self.conv2 = nn.Sequential(\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.ReLU(inplace=True),\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.ReLU(inplace=True),\n )\n\n self.lconv3 = nn.Sequential(\n nn.MaxPool1d(kernel_size=4, stride=4),\n nn.Conv1d(96, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv3 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv4 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv4 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv5 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv5 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv6 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv6 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv7 = nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv7 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconvtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Dropout(p=0.1),\n nn.Conv2d(128, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n ]\n )\n\n self.convtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n ]\n )\n self.final = nn.Sequential(\n nn.Conv2d(64, 5, kernel_size=(1, 1), padding=0),\n nn.BatchNorm2d(5),\n nn.ReLU(inplace=True),\n nn.Conv2d(5, 1, kernel_size=(1, 1), padding=0),\n )\n if num_1d is not None:\n self.final_1d = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=1, padding=0),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, num_1d, kernel_size=1, padding=0),\n nn.Sigmoid(),\n )\n self.num_1d = num_1d", "def _passing_args_impl(self, pool_class_factory):\n DELTA = 12\n ITERATIONS = 100\n pool = pool_class_factory()\n\n pool.start(CoeffMultiplierWorker, {'coeff': DELTA})\n for i in range(ITERATIONS):\n pool.ventilate(message='Vent data {}'.format(i), value=i)\n\n all_results = [pool.get_results() for _ in range(ITERATIONS)]\n self.assertEqual({DELTA}, set(np.diff(sorted(all_results))))\n\n pool.stop()\n pool.join()", "def __init__(self, dropout_rate=0.0, in_channels=3):\n\n super(MaskNet, self).__init__()\n\n self.prep_block_1 = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(32),\n nn.Dropout(dropout_rate),\n )\n self.prep_block_2 = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(32),\n nn.Dropout(dropout_rate),\n )\n\n self.convblock1 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=32, kernel_size=1, padding=0),\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Dropout(dropout_rate),\n )\n\n self.convblock2 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Dropout(dropout_rate),\n )\n\n self.convblock3 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=1, kernel_size=1, padding=0),\n )", "def conv_relu_pool_forward_naive(x, w, b, conv_param, pool_param):\n\ta, conv_cache = conv_forward_naive(x, w, b, conv_param)\n\ts, relu_cache = relu_forward(a)\n\tout, pool_cache = max_pool_forward_naive(s, pool_param)\n\tcache = (conv_cache, relu_cache, pool_cache)\n\treturn out, cache", "def __init__(self, id, node_type=NodeType.HIDDEN, activation=F.relu, layer_type=nn.Conv2d,\n conv_window_size=3, conv_stride=1, max_pool_size=2):\n\n super(ModuleNEATNode, self).__init__(id, node_type)\n\n batch_norm_chance = 0.65 # chance that a new node will start with batch norm\n use_batch_norm = random.random() < batch_norm_chance\n\n dropout_chance = 0.2 # chance that a new node will start with drop out\n use_dropout = random.random() < dropout_chance\n\n max_pool_chance = 0.3 # chance that a new node will start with drop out\n use_max_pool = random.random() < max_pool_chance\n\n self.activation = Mutagen(F.relu, F.leaky_relu, torch.sigmoid, F.relu6,\n discreet_value=activation, name=\"activation function\",\n mutation_chance=0.15) # TODO try add in Selu, Elu\n\n conv_out_features = 25 + random.randint(0, 25)\n linear_out_features = 100 + random.randint(0, 100)\n\n linear_submutagens = \\\n {\n \"regularisation\": Mutagen(None, nn.BatchNorm1d,\n discreet_value=nn.BatchNorm1d if use_batch_norm else None,\n mutation_chance=0.15),\n\n \"dropout\": Mutagen(None, nn.Dropout, discreet_value=nn.Dropout if use_dropout else None, sub_mutagens=\n {\n nn.Dropout: {\n \"dropout_factor\": Mutagen(value_type=ValueType.CONTINUOUS, current_value=0.15, start_range=0,\n end_range=0.75)}\n }, mutation_chance=0.08),\n\n \"out_features\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=linear_out_features,\n start_range=10,\n end_range=1024, name=\"num out features\", mutation_chance=0.22,\n distance_weighting=Props.LAYER_SIZE_COEFFICIENT if Config.allow_attribute_distance else 0)\n }\n\n conv_submutagens = {\n \"conv_window_size\": Mutagen(3, 5, 7, discreet_value=conv_window_size, mutation_chance=0.13),\n\n \"conv_stride\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=conv_stride, start_range=1,\n end_range=5),\n\n \"reduction\": Mutagen(None, nn.MaxPool2d, discreet_value=nn.MaxPool2d if use_max_pool else None,\n sub_mutagens=\n {\n nn.MaxPool2d: {\"pool_size\": Mutagen(\n value_type=ValueType.WHOLE_NUMBERS, current_value=max_pool_size, start_range=2,\n end_range=5)}\n }, mutation_chance=0.15),\n\n \"regularisation\": Mutagen(None, nn.BatchNorm2d, discreet_value=nn.BatchNorm2d if use_batch_norm else None,\n mutation_chance=0.15),\n\n \"dropout\": Mutagen(None, nn.Dropout2d, discreet_value=nn.Dropout2d if use_dropout else None, sub_mutagens=\n {\n nn.Dropout2d: {\n \"dropout_factor\": Mutagen(value_type=ValueType.CONTINUOUS, current_value=0.1,\n start_range=0, end_range=0.75)}\n }, mutation_chance=0.08),\n\n \"out_features\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=conv_out_features, start_range=1,\n end_range=100, name=\"num out features\", mutation_chance=0.22,\n distance_weighting=Props.LAYER_SIZE_COEFFICIENT if Config.allow_attribute_distance else 0)\n }\n\n if use_linears and not use_convs:\n self.layer_type = Mutagen(nn.Linear, discreet_value=nn.Linear,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={nn.Linear: linear_submutagens}\n )\n if use_convs and not use_linears:\n self.layer_type = Mutagen(nn.Conv2d, discreet_value=nn.Conv2d,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={nn.Conv2d: conv_submutagens})\n if use_convs and use_linears:\n self.layer_type = Mutagen(nn.Conv2d, nn.Linear, discreet_value=layer_type,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={\n nn.Conv2d: conv_submutagens,\n nn.Linear: linear_submutagens\n }, name=\"deep layer type\", mutation_chance=0.08)", "def __init__(\n self,\n encoder,\n num_classes=20,\n shape_size=16,\n mask_conv=True,\n min_confidence=0.2,\n min_remain=0.5,\n mask_threshold=0.4,\n ):\n super(PaPs, self).__init__()\n self.encoder = encoder\n self.shape_size = shape_size\n self.num_classes = num_classes\n self.min_scale = 1 / shape_size\n self.register_buffer(\"min_confidence\", torch.tensor([min_confidence]))\n self.min_remain = min_remain\n self.mask_threshold = mask_threshold\n self.center_extractor = CenterExtractor()\n\n enc_dim = encoder.enc_dim\n stack_dim = encoder.stack_dim\n self.heatmap_conv = nn.Sequential(\n ConvLayer(nkernels=[enc_dim, 32, 1], last_relu=False, k=3, p=1,\n padding_mode=\"reflect\"),\n nn.Sigmoid(),\n )\n\n self.saliency_conv = ConvLayer(\n nkernels=[enc_dim, 32, 1], last_relu=False, k=3, p=1,\n padding_mode=\"reflect\"\n )\n\n self.shape_mlp = nn.Sequential(\n nn.Linear(stack_dim, stack_dim // 2),\n nn.BatchNorm1d(stack_dim // 2),\n nn.ReLU(),\n nn.Linear(stack_dim // 2, shape_size ** 2),\n )\n\n self.size_mlp = nn.Sequential(\n nn.Linear(stack_dim, stack_dim // 2),\n nn.BatchNorm1d(stack_dim // 2),\n nn.ReLU(),\n nn.Linear(stack_dim // 2, stack_dim // 4),\n nn.BatchNorm1d(stack_dim // 4),\n nn.ReLU(),\n nn.Linear(stack_dim // 4, 2),\n nn.Softplus(),\n )\n\n self.class_mlp = nn.Sequential(\n nn.Linear(stack_dim, stack_dim // 2),\n nn.BatchNorm1d(stack_dim // 2),\n nn.ReLU(),\n nn.Linear(stack_dim // 2, stack_dim // 4),\n nn.Linear(stack_dim // 4, num_classes),\n )\n\n if mask_conv:\n self.mask_cnn = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3, padding=1),\n nn.GroupNorm(num_channels=16, num_groups=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=16, out_channels=1, kernel_size=3, padding=1),\n )\n else:\n self.mask_cnn = None", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * self.cls_out_channels,\n 3,\n padding=1)\n self.atss_reg = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n self.atss_iou = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])\n\n # we use the global list in loss\n self.cls_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]\n self.reg_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]", "def test_threshold_constructors(ndraw=1000, burnin=200):\n\n cls = threshold\n for const_info, rand in product(zip([gaussian_instance,\n logistic_instance,\n poisson_instance],\n [cls.gaussian,\n cls.logistic,\n cls.poisson]),\n ['gaussian', 'logistic', 'laplace']):\n\n inst, const = const_info\n X, Y = inst()[:2]\n W = np.ones(X.shape[1])\n\n n, p = X.shape\n active = np.zeros(p, np.bool)\n active[:int(p/2)] = True\n\n candidate = ~active\n candidate[-int(p/4):] = False\n\n conv1 = const(X, Y, W, active=active)\n conv1.fit()\n\n conv2 = const(X, Y, W, candidate=candidate)\n conv2.fit()\n \n conv3 = const(X, Y, W, candidate=candidate, active=active)\n conv3.fit()\n \n selected_features = np.zeros(p, np.bool)\n selected_features[:3] = True\n\n conv3.summary(selected_features,\n ndraw=ndraw,\n burnin=burnin)", "def __init__(self, pool_size):\n self.pool_size = pool_size\n if self.pool_size > 0: # create an empty pool\n self.num_imgs = 0\n self.images = []", "def BCLoss(img, patch_size):\n patch_size = 35\n dc = maxpool(img[:, None, :, :, :])\n \n target = Variable(torch.FloatTensor(dc.shape).zero_().cuda()+1) \n loss = L1Loss(reduction='sum')(dc, target)\n return loss", "def __init__(self):\n super(CNN, self).__init__()\n\n self.conv0 = nn.Conv2d(3, 3, kernel_size=5, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv0.weight)\n\n self.conv1 = nn.Conv2d(3, 30, kernel_size=5, stride=2, padding=0)\n self.conv1.weight = nn.Parameter(get_filters())\n\n self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n\n self.conv2 = nn.Conv2d(30, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv2.weight)\n\n self.conv3 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv3.weight)\n\n self.conv4 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv4.weight)\n\n self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n\n self.conv5 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv5.weight)\n\n self.conv6 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv6.weight)\n\n self.conv7 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv7.weight)\n\n self.conv8 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv8.weight)\n\n self.fc = nn.Linear(16 * 5 * 5, 2)\n\n self.drop1 = nn.Dropout(p=0.5) # used only for the NC dataset", "def _make_conv_layers(self):\n conv = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=1), # padding=3 so, output is 224.\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 192, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(192, 128, 1, padding=1), ## kernel size = 1 이므로 padding = 0(defalut)\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(128, 256, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(1024, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(1024, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True)\n )\n return conv", "def test_autocreate_licensepool(self):\n identifier = self._identifier()\n assert [] == identifier.licensed_through\n provider = AlwaysSuccessfulCollectionCoverageProvider(\n self._default_collection\n )\n pool = provider.license_pool(identifier)\n assert [pool] == identifier.licensed_through\n assert pool.data_source == provider.data_source\n assert pool.identifier == identifier\n assert pool.collection == provider.collection\n\n # Calling license_pool again finds the same LicensePool\n # as before.\n pool2 = provider.license_pool(identifier)\n assert pool == pool2\n\n # It's possible for a CollectionCoverageProvider to create a\n # LicensePool for a different DataSource than the one\n # associated with the Collection. Only the metadata wrangler\n # needs to do this -- it's so a CoverageProvider for a\n # third-party DataSource can create an 'Internal Processing'\n # LicensePool when some other part of the metadata wrangler\n # failed to do this earlier.\n\n # If a working pool already exists, it's returned and no new\n # pool is created.\n same_pool = provider.license_pool(\n identifier, DataSource.INTERNAL_PROCESSING\n )\n assert same_pool == pool2\n assert provider.data_source == same_pool.data_source\n\n # A new pool is only created if no working pool can be found.\n identifier2 = self._identifier()\n new_pool = provider.license_pool(\n identifier2, DataSource.INTERNAL_PROCESSING\n )\n assert new_pool.data_source.name == DataSource.INTERNAL_PROCESSING\n assert new_pool.identifier == identifier2\n assert new_pool.collection == provider.collection", "def __init__(self, num_gpus):\n\n super(Critic, self).__init__()\n n_in = IMG_CHANNELS\n n_out = 1\n\n feature_map = IMG_SIZE\n kernel_size = 4\n stride = 2\n padding = 1\n bias = False\n\n self.num_gpus = num_gpus\n\n self.network = nn.Sequential(\n # nodes = IMG_CHANNELS * IMG_SIZE * IMG_SIZE\n nn.Conv2d(n_in, feature_map, kernel_size, stride, padding, bias=bias),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 2\n nn.Conv2d(feature_map, feature_map * 2, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 4\n nn.Conv2d(feature_map * 2, feature_map * 4, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 8\n nn.Conv2d(feature_map * 4, feature_map * 8, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 8),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 8\n nn.Conv2d(feature_map * 8, n_out, kernel_size, 1, 0, bias=bias),\n # scratched sigmoid activation function\n )", "def test_weighted_strategy_pool(self):\n environment.set_value('STRATEGY_SELECTION_METHOD', 'multi_armed_bandit')\n strategy_pool = strategy_selection.generate_weighted_strategy_pool(\n strategy_list=strategy.AFL_STRATEGY_LIST,\n use_generator=True,\n engine_name='afl')\n self.assertFalse(\n strategy_pool.do_strategy(strategy.CORPUS_MUTATION_RADAMSA_STRATEGY))\n self.assertTrue(strategy_pool.do_strategy(strategy.CORPUS_SUBSET_STRATEGY))", "def _minimal_device_test(device: torch.device) -> bool:\n try:\n with torch.no_grad():\n model = torch.nn.Conv2d(1, 1, 1).to(device)\n x = torch.zeros(1, 1, 1, 1).to(device)\n y = model(x)\n del model, x, y\n except Exception as e:\n return False\n\n return True", "def __init__(self):\n super(Encoder, self).__init__()\n\n self.lconv1 = nn.Sequential(\n nn.Conv1d(4, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n )\n\n self.conv1 = nn.Sequential(\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n )\n\n self.lconv2 = nn.Sequential(\n nn.MaxPool1d(kernel_size=4, stride=4),\n nn.Conv1d(64, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n )\n\n self.conv2 = nn.Sequential(\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.ReLU(inplace=True),\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.ReLU(inplace=True),\n )\n\n self.lconv3 = nn.Sequential(\n nn.MaxPool1d(kernel_size=4, stride=4),\n nn.Conv1d(96, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv3 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv4 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv4 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv5 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv5 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv6 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv6 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv7 = nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv7 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )", "def test_sequential_network_config_for_symmetric(mnist_model_quantization):\n data = torch.randn(1, 1, 28, 28)\n prepared_model, quantizer = quantize_model(mnist_model_quantization, data)\n\n # verify module fusion\n assert isinstance(prepared_model.conv1, _qat.ConvBnAct2d)\n assert isinstance(prepared_model.conv2, _qat.ConvAct2d)\n assert isinstance(prepared_model.dense1, _qat.LinearAct)\n assert isinstance(prepared_model.dense2, _qat.LinearAct)\n\n # verify activation quantizers\n # after input\n assert prepared_model.activation_post_process_0.qscheme == torch.per_tensor_symmetric\n # after conv1\n assert prepared_model.activation_post_process_1.qscheme == torch.per_tensor_affine\n # after pool, this is shared with output of conv1\n assert id(prepared_model.activation_post_process_1) == id(prepared_model.activation_post_process_2)\n # after conv2\n assert prepared_model.activation_post_process_3.qscheme == torch.per_tensor_affine\n # after pool and flatten, shared with output of conv2\n assert id(prepared_model.activation_post_process_3) == id(prepared_model.activation_post_process_4)\n assert id(prepared_model.activation_post_process_3) == id(prepared_model.activation_post_process_5)\n # after linear1\n assert prepared_model.activation_post_process_6.qscheme == torch.per_tensor_affine\n # after dropout\n # we remove activation post process after dropout layer\n assert not hasattr(prepared_model, \"activation_post_process_7\")\n # after linear2, logsoftmax\n assert prepared_model.activation_post_process_8.qscheme == torch.per_tensor_symmetric\n\n # convert model and test fusion\n converted_model = quantizer.finalize(inplace=False)\n\n # assert converted module fusion\n assert isinstance(converted_model.conv1, _quantized.QuantizedConvAct2d)\n assert isinstance(converted_model.conv2, _quantized.QuantizedConvAct2d)\n assert isinstance(converted_model.dense1, _quantized.QuantizedLinearAct)\n assert isinstance(converted_model.dense2, _quantized.QuantizedLinearAct)", "def __init__(self, n_input_channels=3, n_conv_output_channels=16, k=3, s=1, pad=1, p = 0.5):\n super(ModelCNN, self).__init__()\n # 1. Convolutional layers\n # Single image is in shape: 3x96x96 (CxHxW, H==W), RGB images\n self.conv1 = nn.Conv2d(in_channels = n_input_channels, out_channels = n_conv_output_channels, kernel_size = k, stride = s, padding = pad)\n self.bn1 = nn.BatchNorm2d(n_conv_output_channels)\n self.conv2 = nn.Conv2d(in_channels = n_conv_output_channels, out_channels = 2*n_conv_output_channels, kernel_size = k, stride = s, padding = pad)\n self.bn2 = nn.BatchNorm2d(2*n_conv_output_channels)\n self.conv3 = nn.Conv2d(in_channels = 2*n_conv_output_channels, out_channels = 4*n_conv_output_channels, kernel_size = k, stride = s, padding = pad)\n self.bn3 = nn.BatchNorm2d(4*n_conv_output_channels)\n self.conv4 = nn.Conv2d(in_channels = 4*n_conv_output_channels, out_channels = 8*n_conv_output_channels, kernel_size = k, stride = s, padding = pad)\n self.bn4 = nn.BatchNorm2d(8*n_conv_output_channels)\n self.pool = nn.MaxPool2d(kernel_size = k - 1, stride = 2*s, padding = pad - pad)\n \n self.dropout = nn.Dropout(p = p)\n \n # 2. FC layers to final output\n self.fc1 = nn.Linear(in_features = 288*n_conv_output_channels, out_features = 32*n_conv_output_channels)\n self.fc_bn1 = nn.BatchNorm1d(32*n_conv_output_channels)\n self.fc2 = nn.Linear(in_features = 32*n_conv_output_channels, out_features = 16*n_conv_output_channels)\n self.fc_bn2 = nn.BatchNorm1d(16*n_conv_output_channels)\n self.fc3 = nn.Linear(in_features = 16*n_conv_output_channels, out_features = 8*n_conv_output_channels)\n self.fc_bn3 = nn.BatchNorm1d(8*n_conv_output_channels)\n self.fc4 = nn.Linear(in_features = 8*n_conv_output_channels, out_features = 1)" ]
[ "0.6518167", "0.6481036", "0.64059615", "0.6386542", "0.6330426", "0.6294219", "0.62897485", "0.6237556", "0.61123407", "0.603048", "0.6028792", "0.5981502", "0.58038086", "0.57659817", "0.57648206", "0.5753006", "0.573803", "0.57336324", "0.57228494", "0.57161427", "0.5715199", "0.56864136", "0.56796277", "0.5662202", "0.5659104", "0.565004", "0.5647536", "0.5641005", "0.56172484", "0.5614838", "0.56003815", "0.5595076", "0.5587884", "0.55578774", "0.5556559", "0.5553171", "0.55480236", "0.5533569", "0.55258995", "0.5515524", "0.5509433", "0.55070066", "0.5505726", "0.55032426", "0.54994637", "0.54903615", "0.5484284", "0.5482348", "0.54793227", "0.5478267", "0.5473", "0.5470108", "0.5465916", "0.5456924", "0.545373", "0.54503316", "0.5448852", "0.54432243", "0.5440244", "0.54380566", "0.54368514", "0.5433879", "0.54235196", "0.54206467", "0.5419413", "0.54182804", "0.54157066", "0.54144824", "0.54124814", "0.53942037", "0.5372658", "0.53683716", "0.53651375", "0.5364342", "0.5356771", "0.53553754", "0.53546405", "0.53476834", "0.53475904", "0.5344413", "0.5343407", "0.53424084", "0.53422767", "0.53313124", "0.53303087", "0.5325644", "0.5322439", "0.53204393", "0.531875", "0.5317766", "0.53119093", "0.5303017", "0.530246", "0.52952635", "0.5294796", "0.5293005", "0.528811", "0.5285851", "0.52841693", "0.52783704" ]
0.5887395
12
returns number of output channels
def get_n_channels(self): return self.n_out_channels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_channels(self):\n return len(self.channels)", "def num_channels_per_output(cls) -> list[tuple[int, ...]]:\n return [\n (16, 24, 40, 112, 320),\n (16, 24, 40, 112, 320),\n (16, 24, 48, 120, 352),\n (24, 32, 48, 136, 384),\n (24, 32, 56, 160, 448),\n (24, 40, 64, 176, 512),\n (32, 40, 72, 200, 576),\n (32, 48, 80, 224, 640),\n (32, 56, 88, 248, 704),\n (72, 104, 176, 480, 1376),\n ]", "def get_num_channels():\r\n check_mixer()\r\n return sdl.Mix_GroupCount(-1)", "def channels(self) -> int:\n return len(self._channel_arrays)", "def num_of_channels(self) -> int:\n return len(self.non_zero_channels())", "def num_channels(self):\n return 3", "def n_channels(self):\n return self._n_channels", "def getNchan(self):\n return self.shape(squeeze=False)[2]", "def nchans(self):\n return self.bw / self.bw_chan", "def get_num_channels(x):\n return x.get_shape().as_list()[-1]", "def channel_size(self):\n if self.channels is None:\n return 0\n return self.channels.size", "def get_num_outputs(self):\n return len(self.outputs)", "def get_num_channels(self):\r\n check_mixer()\r\n return sdl.Mix_GroupCount(self._chunk_tag)", "def get_num_channels(self):\n return _uhd_swig.rx_streamer_get_num_channels(self)", "def get_number_of_output_ports(self):\n return 1", "def out_channels(self):\r\n return [self._width] * (self._depth + 1)", "def n_outputs(self):\n return len(self.output_names())", "def n_outputs(self):\n return self.__n_outputs", "def out_channel(self) -> int:\n return self._get_divisible_channel(self.args[0] * self.width_multiply)", "def num_frames(self):\n return self._first_rgb.shape[1]", "def num_channels(self):\n with audioread.audio_open(self.path) as f:\n return f.channels", "def n_outputs(self):\n return len(self._output_labels)", "def n_outputs(self):\n return len(self._output_labels)", "def get_num_channels(self):\n return _uhd_swig.tx_streamer_get_num_channels(self)", "def num_channels(input_tensor):\n return input_tensor.get_shape().as_list()[-1]", "def get_num_of_output_tensors(self):\n return self._engine.get_num_of_output_tensors()", "def channel_count(self):\n index = self._ordered_input_names.index('channel_count')\n return self._inputs[index]", "def _get_cls_out_channels(self):\n # Class numbers (k) + objectness (1)\n return self.num_classes", "def GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetNumberOfComponents()", "def outdim(self):\n return len(self.getSensors())", "def _calc_out_channels(self, in_channels):\n out_channels = min(in_channels * 2, self.max_channels)\n return out_channels", "def nOutputs(self):\n\n\t\treturn self._nOutputs", "def get_number_of_channels(tgt_l, model_graph):\n return int(model_graph.get_tensor_by_name(tgt_l + ':0').get_shape()[-1])", "def channels(self) -> int:\n return self._channels", "def GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUS_GetNumberOfComponents()", "def get_num_frames(self):\n return self._frames.shape[0]", "def get_sound_output_devs_count(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetSoundOutputDevsCount', self.handle)", "def __len__(self) -> int:\n\n return len(self._space.CHANNELS) + 1", "def itkRGBAPixelUC_GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetNumberOfComponents()", "def get_num_measured_outputs(self):\n i = 0\n for o in self.outputs:\n if o.is_measured_output():\n i += 1\n return i", "def GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelF_GetNumberOfComponents()", "def device_count() -> int:\n return flow._oneflow_internal.CudaGetDeviceCount()", "def outputs_count(self):\n return len(self._output_nodes_map.keys())", "def output_channels(self, input_channels):\n return input_channels", "def _n_features_out(self):\n return self.components_.shape[0]", "def output_channels(self, input_channels):\n pass", "def __len__(self):\n num_x, num_y = self.conv_dims()\n return num_x * num_y", "def numberOfCamera():\n return numCams", "def get_dimension_number(self) -> int:\n return np.squeeze(self._channel_arrays[0]).ndim", "def get_output_dim(self) -> int:\n raise NotImplementedError", "def output_size(self) -> int:\n return self.output_dim", "def num_feature_outputs(self):\n pass", "def itkRGBAPixelUS_GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUS_GetNumberOfComponents()", "def get_output_dim(self) -> int:\n raise NotImplementedError", "def num_layers(self): # -> int:\n ...", "def num_channels(chip):\n return int(utils.readstr_all(os.path.join(_CHIP_PATH(chip), \"npwm\")))", "def itkRGBAPixelF_GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelF_GetNumberOfComponents()", "def num_devices(self):\n # put every device into bypass mode (IR = all 1's)\n tdi = bits.bits()\n tdi.ones(_flush_size)\n self.driver.scan_ir(tdi)\n # now each DR is a single bit\n # the DR chain length is the number of devices\n return self.dr_length()", "def num_output_group(self):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n out = ctypes.c_size_t()\n _check_call(_LIB.TreeliteQueryNumOutputGroups(self.handle, ctypes.byref(out)))\n return out.value", "def output_dim(self):\n return self._output_dim", "def num_outputs(cls) -> list[int]:\n return [5] * 10", "def _get_reg_out_channels(self):\n # Bbox classification and regression\n # (center residual (3), size regression (3)\n # torch.cos(yaw) (1), torch.sin(yaw) (1)\n return self.bbox_coder.code_size", "def get_size(channels):\n\n if channels not in (1, 2):\n raise ValueError('Wrong channels value. Must be equal to 1 or 2')\n\n return _get_size(channels)", "def get_count():\n _check_init()\n return _pypm.CountDevices()", "def get_control_count(cmd):\n return len(cmd.control_qubits)", "def manage_channels(_) -> int:\n return 1 << 4", "def manage_channels(_) -> int:\n return 1 << 4", "def NumBits(self):\n num_bits = 8*len(self.output)\n if self.out_boff % 8:\n num_bits -= 8\n num_bits += self.out_boff\n if num_bits < 0:\n print \"What the...\"\n return num_bits", "def get_img_channels(self, pipeline_cfg: RVPipelineConfig) -> int:\n all_scenes = pipeline_cfg.dataset.all_scenes\n if len(all_scenes) == 0:\n return 3\n for scene_cfg in all_scenes:\n if scene_cfg.raster_source.channel_order is not None:\n return len(scene_cfg.raster_source.channel_order)\n log.info(\n 'Could not determine number of image channels from '\n 'DataConfig.img_channels or RasterSourceConfig.channel_order. '\n 'Building first scene to figure it out. This might take some '\n 'time. To avoid this, specify one of the above.')\n with get_tmp_dir() as tmp_dir:\n scene = all_scenes[0].build(\n pipeline_cfg.dataset.class_config,\n tmp_dir,\n use_transformers=True)\n img_channels = scene.raster_source.num_channels\n return img_channels", "def getNumOutputs(self):\n return _libsbml.Transition_getNumOutputs(self)", "def size_out(self):\n if isinstance(self.ensemble.neuron_type, Direct):\n # This will prevent users from connecting/probing Direct neurons\n # (since there aren't actually any neurons being simulated).\n return 0\n return self.ensemble.n_neurons", "def output_size(self) -> int:\n return self.win_length", "def number_fdma_channels (b_hz, g_hz, u_hz):\n available_bandwidth = b_hz - g_hz #Take off one guard band since we need N+1 guard bands\n sub_channel_req = g_hz + u_hz\n num_users = math.floor(available_bandwidth / sub_channel_req)\n return num_users", "def sample_count(self):\n assert len(self.decay_x) == len(self.decay_y)\n return len(self.decay_x)", "def num_feature_outputs(self):\n return 1", "def num_wires(self):", "def __len__(self):\n\n try:\n return len(self.counts)\n except SpectrumError:\n return len(self.cps)", "def getNrSamples(self): \r\n return self.numSamples", "def countChannels(channels):\n if (channels == ''):\n return 0\n tokens = channels.split(',')\n nspw = len(tokens)\n count = {}\n for i in range(nspw):\n string = tokens[i].split(':')\n if (len(string) == 2):\n spw,string = string\n else:\n string = string[0]\n spw = 0\n ranges = string.split(';')\n for r in ranges:\n c0 = int(r.split('~')[0])\n c1 = int(r.split('~')[1])\n if (c0 > c1):\n casalogPost(\"Invalid channel range: c0 > c1 (%d > %d)\" % (c0,c1))\n return\n channels = [1+int(r.split('~')[1])-int(r.split('~')[0]) for r in ranges]\n count[spw] = np.sum(channels)\n if (nspw == 1):\n count = count[spw]\n return(count)", "def comchans(self, nick):\n comchannels = 0\n for chan in self.chandb:\n if nick in chan:\n comchannels += 1\n return comchannels", "def num_devices(self):\n\t\t\treturn cuda.Device.count()", "def outputsizes(self):\n result = [] \n for q in self.outqueues:\n result.append(q.qsize())\n return result", "def get_number_of_input_connections(self):\n return 1", "def getOutputLength(self):\n return len(self.Y[0])", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def get_output_tensor_size(self, index):\n return self._engine.get_output_tensor_size(index)", "def countGPUs(self):\n return libnao_gpu.CountDevices()", "def num_pipes(self):\n return len(self._link_reg.pipe_names)", "def n_components(self):\n return self._components.shape[0]", "def _get_number_of_gpu_devices_connected(self):\n gpu_devices = self._get_gpu_pci_devices()\n gpu_devices_count = len(gpu_devices)\n return {'pci_gpu_devices': gpu_devices_count}", "def max_noutput_items(self) -> \"int\":\n return _beamforming_swig.randomsampler_sptr_max_noutput_items(self)", "def size(self):\n return self.num_inputs, self.num_outputs", "def num_layers(self):\n return self._num_layers", "def get_num_of_images(self):", "def get_out_dim(self) -> int:\n return self.out_dim", "def n_spectra(self):\n return np.product(self.image_shape)", "def output_dim(self) -> int:\n return 2 * self._hidden_dim", "def times(self) -> int:\n return self._channel_arrays[0].shape[self.time_pos]", "def get_length(self):\r\n check_mixer()\r\n frequency, format, channels = (ffi.new('int*'), ffi.new('uint16_t*'),\r\n ffi.new('int*'))\r\n sdl.Mix_QuerySpec(frequency, format, channels)\r\n if format == sdl.AUDIO_S8 or format == sdl.AUDIO_U8:\r\n mixerbytes = 1.0\r\n else:\r\n mixerbytes = 2.0\r\n numsamples = self.chunk.alen / mixerbytes / channels[0]\r\n return numsamples / frequency[0]", "def _number_of_samples(self):\n return len(self._raw_data.samples)" ]
[ "0.7895569", "0.78375614", "0.7822447", "0.77712107", "0.763508", "0.7623057", "0.76186246", "0.75209033", "0.7185467", "0.717589", "0.71026057", "0.70969474", "0.7064426", "0.7045207", "0.6935932", "0.6927934", "0.69142646", "0.68657845", "0.6865311", "0.6837313", "0.68344104", "0.68180615", "0.68180615", "0.67348015", "0.67129594", "0.6697453", "0.669631", "0.66316694", "0.6584298", "0.6572451", "0.656691", "0.6535079", "0.6528081", "0.65194184", "0.6473102", "0.6470141", "0.6451097", "0.6439375", "0.6418913", "0.6416347", "0.64159495", "0.6406746", "0.6400653", "0.63962936", "0.6383499", "0.63727564", "0.6362386", "0.63583755", "0.6347892", "0.63472974", "0.633909", "0.6313229", "0.6273918", "0.6251309", "0.6245644", "0.6206478", "0.62034655", "0.6194772", "0.6184257", "0.6177779", "0.6176059", "0.6160119", "0.6151533", "0.61434734", "0.613751", "0.6137257", "0.6137257", "0.6111357", "0.60910016", "0.60704184", "0.6058816", "0.6046864", "0.6046727", "0.6043651", "0.60315555", "0.6012549", "0.6005122", "0.5999347", "0.5985333", "0.5983717", "0.5979791", "0.5970604", "0.59702194", "0.59679276", "0.5963096", "0.5938573", "0.592959", "0.5914273", "0.5913915", "0.58997405", "0.5899218", "0.58941483", "0.58937496", "0.5871926", "0.5870722", "0.58552396", "0.58440083", "0.58419746", "0.583055", "0.58259803" ]
0.86962193
0
Adds operations that perform JPEG decoding and resizing to the graph..
def _image_preprocess_fn(image_buffer, input_height, input_width, input_mean, input_std, return_full_size_image=False): # image_buffer 1-D string Tensor representing the raw JPEG image buffer. # Extract image shape from raw JPEG image buffer. image_shape = tf.image.extract_jpeg_shape(image_buffer) # Decode and crop image. offset_x = 0 offset_y = image_shape[0] // 3 # We want to crop off the top fifth of the image crop_width = image_shape[1] crop_height = 2 * image_shape[0] // 3 crop_window = tf.stack([offset_y, offset_x, crop_height, crop_width]) cropped_image = tf.image.decode_and_crop_jpeg(image_buffer, crop_window, channels=3) # Resize image. # decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32) decoded_image_4d = tf.expand_dims(cropped_image, 0) resize_shape = tf.stack([input_height, input_width]) resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32) resized_image = tf.image.resize_bilinear(decoded_image_4d, resize_shape_as_int) # Normalize image offset_image = tf.subtract(resized_image, input_mean) mul_image = tf.multiply(offset_image, 1.0 / input_std) if return_full_size_image: return tf.squeeze(mul_image, axis=0), cropped_image return tf.squeeze(mul_image, axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, image):", "def process_image(self):\n pass", "def adjust(self, image):\n ...", "def augment(self, image):\n pass", "def add_jpeg_decoding(input_width, input_height, input_depth, input_mean,\n input_std):\n\n jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n resize_shape = tf.stack([input_height, input_width])\n resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)\n resized_image = tf.image.resize_bilinear(decoded_image_4d,\n resize_shape_as_int)\n offset_image = tf.subtract(resized_image, input_mean)\n mul_image = tf.multiply(offset_image, 1.0 / input_std)\n\n return jpeg_data, mul_image, decoded_image", "def add_jpeg_decoding(input_width, input_height, input_depth, input_mean,\n input_std):\n jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n resize_shape = tf.stack([input_height, input_width])\n resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)\n resized_image = tf.image.resize_bilinear(decoded_image_4d,\n resize_shape_as_int)\n offset_image = tf.subtract(resized_image, input_mean)\n mul_image = tf.multiply(offset_image, 1.0 / input_std)\n return jpeg_data, mul_image", "def add_jpeg_decoding(input_width, input_height, input_depth, input_mean,\n input_std):\n jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n resize_shape = tf.stack([input_height, input_width])\n resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)\n resized_image = tf.image.resize_bilinear(decoded_image_4d,\n resize_shape_as_int)\n offset_image = tf.subtract(resized_image, input_mean)\n mul_image = tf.multiply(offset_image, 1.0 / input_std)\n return jpeg_data, mul_image", "def process(image):\n pass", "def encode_decode(self, img, img_metas):\n pass", "def encode(image):\n from encoder import launch\n launch(image)", "def jpeg_decode(hic: hic.HicImage) -> model.CompressedImage:\n utils.debug_msg(\"JPEG decode\")\n assert hic.hic_type == model.Compression.JPEG\n payloads = hic.payloads\n utils.debug_msg(\"Decoding Huffman trees\")\n dc_huffs = {\n \"lum\": huffman_decode(payloads[0]),\n \"cr\": huffman_decode(payloads[1]),\n \"cb\": huffman_decode(payloads[2])\n }\n ac_value_huffs = {\n \"lum\": huffman_decode(payloads[3]),\n \"cr\": huffman_decode(payloads[4]),\n \"cb\": huffman_decode(payloads[5])\n }\n ac_length_huffs = {\n \"lum\": huffman_decode(payloads[6]),\n \"cr\": huffman_decode(payloads[7]),\n \"cb\": huffman_decode(payloads[8])\n }\n\n utils.debug_msg(\"Decode DC differences\")\n dc_comps = {\n \"lum\": huffman_data_decode(payloads[9], dc_huffs[\"lum\"]),\n \"cr\": huffman_data_decode(payloads[10], dc_huffs[\"cr\"]),\n \"cb\": huffman_data_decode(payloads[11], dc_huffs[\"cb\"]),\n }\n\n utils.debug_msg(\"Decode RLE values\")\n ac_values = {\n \"lum\": huffman_data_decode(payloads[12], ac_value_huffs[\"lum\"]),\n \"cr\": huffman_data_decode(payloads[13], ac_value_huffs[\"cr\"]),\n \"cb\": huffman_data_decode(payloads[14], ac_value_huffs[\"cb\"]),\n }\n utils.debug_msg(\"Decode RLE lengths\")\n ac_lengths = {\n \"lum\": huffman_data_decode(payloads[15], ac_length_huffs[\"lum\"]),\n \"cr\": huffman_data_decode(payloads[16], ac_length_huffs[\"cr\"]),\n \"cb\": huffman_data_decode(payloads[17], ac_length_huffs[\"cb\"]),\n }\n shapes = {\n \"lum\": payloads[18].numbers,\n \"cr\": payloads[19].numbers,\n \"cb\": payloads[19].numbers\n }\n utils.debug_msg(\"Unloaded all of the data\")\n # ====\n\n sub_length = utils.size(settings.JPEG_BLOCK_SHAPE()) - 1\n utils.debug_msg(\"Calculating AC RLEs\")\n ac_rle = utils.dict_map(ac_values,\n lambda k, v: [RunLength(t[1], t[0]) for t in list(zip(ac_lengths[k], v))])\n\n def ac_mat_fun(k, v):\n utils.debug_msg(\"Determining deficient AC matricies for: \" + k)\n ac_length = utils.size(shapes[k]) - len(dc_comps[k])\n out = decode_run_length(v, ac_length)\n if k == \"lum\":\n s = [str(i) for i in out]\n print(\" \".join(s))\n return out\n\n ac_mats = utils.dict_map(ac_rle, ac_mat_fun)\n ac_mats = utils.dict_map(ac_mats, lambda _, v: utils.group_tuples(v, sub_length))\n dc_comps = utils.dict_map(dc_comps, lambda _, v: utils.invert_differences(v))\n\n def merge_comps(dc_key, dc_values):\n utils.debug_msg(\"Merging: \" + dc_key)\n tuples = ac_mats[dc_key] # there are all of the AC zigzag arrays missing their DC component\n assert len(tuples) == len(dc_values)\n zipped = zip(dc_values, tuples) # combine them to be mapped later\n lin_mats = [[t[0], *t[1]] for t in zipped] # create the linearized block\n mats = [transform.izigzag(np.array(m), settings.JPEG_BLOCK_SHAPE()) for m in lin_mats]\n return mats\n\n compressed = utils.dict_map(dc_comps, merge_comps)\n merged = utils.dict_map(compressed, lambda k, v: transform.merge_blocks(np.array(v), shapes[k]))\n return model.CompressedImage.from_dict(merged)", "def process(self,pixmap):", "def _build_final_image(self, image):\n raise NotImplementedError", "def process_image(image):\n image = resize(image)\n return image", "def concatenate_frames(I, Stokes, AOP, DOP, path_process, k, imgs_polar): #, Min, Max, im_cos, im_sin, rho, phi):\n\n \"\"\"# Fusion\n im_fusion = np.zeros((500, 500, 5), dtype=int)\n im_fusion[:, :, 0] = Stokes[0]\n im_fusion[:, :, 1] = Stokes[1]\n im_fusion[:, :, 2] = Stokes[2]\n im_fusion[:, :, 3] = AOP\n im_fusion[:, :, 4] = DOP\n if not os.path.exists(path_process + \"Fusion/\"):\n os.mkdir(path_process + \"Fusion/\")\n np.save(path_process + \"Fusion/\" + imgs_polar[k].split(\".\")[0], im_fusion.astype(np.uint8))\"\"\"\n\n \"\"\"# RetinaNet intensities\n im_I04590 = np.zeros((500, 500, 3))\n im_I04590[:, :, 0] = I[0]\n im_I04590[:, :, 1] = I[1]\n im_I04590[:, :, 2] = I[2]\n if not os.path.exists(path_process + \"I04590/\"):\n os.mkdir(path_process + \"I04590/\")\n imageio.imwrite(path_process + \"I04590/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_I04590)\n\n # Min Max total intensity\n im_min_max = np.zeros((500, 500, 3))\n im_min_max[:, :, 0] = Stokes[0]\n im_min_max[:, :, 1] = Max\n im_min_max[:, :, 2] = Min\n if not os.path.exists(path_process + \"MinMax/\"):\n os.mkdir(path_process + \"MinMax/\")\n imageio.imwrite(path_process + \"MinMax/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_min_max)\n\n # Cos Sin total intensity\n im_cos_sin = np.zeros((500, 500, 3))\n im_cos_sin[:, :, 0] = Stokes[0]\n im_cos_sin[:, :, 1] = im_cos\n im_cos_sin[:, :, 2] = im_sin\n if not os.path.exists(path_process + \"CosSin/\"):\n os.mkdir(path_process + \"CosSin/\")\n imageio.imwrite(path_process + \"CosSin/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_cos_sin)\"\"\"\n\n \"\"\"# Cos Sin total intensity\n im_cos_sin = np.zeros((500, 500, 3))\n im_cos_sin[:, :, 0] = DOP\n im_cos_sin[:, :, 1] = im_cos\n im_cos_sin[:, :, 2] = im_sin\n if not os.path.exists(path_process + \"CosSin2_s/\"):\n os.mkdir(path_process + \"CosSin2_s/\")\n imageio.imwrite(path_process + \"CosSin2_s/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_cos_sin)\"\"\"\n\n\n \"\"\"im_I045135 = np.zeros((500, 500, 3))\n im_I045135[:, :, 0] = I[0]\n im_I045135[:, :, 1] = I[3]\n im_I045135[:, :, 2] = I[1]\n if not os.path.exists(path_process + \"I013545/\"):\n os.mkdir(path_process + \"I013545/\")\n imageio.imwrite(path_process + \"I013545/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_I045135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[0]\n im_I090135[:, :, 1] = I[2]\n im_I090135[:, :, 2] = I[3]\n if not os.path.exists(path_process + \"I090135/\"):\n os.mkdir(path_process + \"I090135/\")\n imageio.imwrite(path_process + \"I090135/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_I090135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[1]\n im_I4590135[:, :, 1] = I[2]\n im_I4590135[:, :, 2] = I[3]\n if not os.path.exists(path_process + \"I4590135/\"):\n os.mkdir(path_process + \"I4590135/\")\n imageio.imwrite(path_process + \"I4590135/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_I4590135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[0] - I[1]\n im_I090135[:, :, 1] = I[0]\n im_I090135[:, :, 2] = I[0] + I[1]\n if not os.path.exists(path_process + \"RetinaNet_Ieq1/\"):\n os.mkdir(path_process + \"RetinaNet_Ieq1/\")\n imageio.imwrite(path_process + \"RetinaNet_Ieq1/\" + str(k) + \".png\", im_I090135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[0] - I[3]\n im_I090135[:, :, 1] = I[0]\n im_I090135[:, :, 2] = I[0] + I[3]\n if not os.path.exists(path_process + \"RetinaNet_Ieq2/\"):\n os.mkdir(path_process + \"RetinaNet_Ieq2/\")\n imageio.imwrite(path_process + \"RetinaNet_Ieq2/\" + str(k) + \".png\", im_I090135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[1] - I[2]\n im_I090135[:, :, 1] = I[1]\n im_I090135[:, :, 2] = I[1] + I[2]\n if not os.path.exists(path_process + \"RetinaNet_Ieq3/\"):\n os.mkdir(path_process + \"RetinaNet_Ieq3/\")\n imageio.imwrite(path_process + \"RetinaNet_Ieq3/\" + str(k) + \".png\", im_I090135)\n\n im_I090135 = np.zeros((500, 500, 3))\n im_I090135[:, :, 0] = I[0]/I[1]\n im_I090135[:, :, 1] = I[0]/I[2]\n im_I090135[:, :, 2] = I[0]/I[3]\n if not os.path.exists(path_process + \"RetinaNet_Ieq4/\"):\n os.mkdir(path_process + \"RetinaNet_Ieq4/\")\n imageio.imwrite(path_process + \"RetinaNet_Ieq4/\" + str(k) + \".png\", im_I090135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[0]\n im_I4590135[:, :, 1] = I[0]/I[1]\n im_I4590135[:, :, 2] = I[0]/I[2]\n if not os.path.exists(path_process + \"RetinaNet_eq5/\"):\n os.mkdir(path_process + \"RetinaNet_eq5/\")\n imageio.imwrite(path_process + \"RetinaNet_eq5/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[0]\n im_I4590135[:, :, 1] = I[0] / I[2]\n im_I4590135[:, :, 2] = I[0] / I[3]\n if not os.path.exists(path_process + \"RetinaNet_eq6/\"):\n os.mkdir(path_process + \"RetinaNet_eq6/\")\n imageio.imwrite(path_process + \"RetinaNet_eq6/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[1] / I[0]\n im_I4590135[:, :, 1] = I[1] / I[2]\n im_I4590135[:, :, 2] = I[1] / I[3]\n if not os.path.exists(path_process + \"RetinaNet_eq7/\"):\n os.mkdir(path_process + \"RetinaNet_eq7/\")\n imageio.imwrite(path_process + \"RetinaNet_eq7/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[2] / I[0]\n im_I4590135[:, :, 1] = I[2] / I[1]\n im_I4590135[:, :, 2] = I[2] / I[3]\n if not os.path.exists(path_process + \"RetinaNet_eq8/\"):\n os.mkdir(path_process + \"RetinaNet_eq8/\")\n imageio.imwrite(path_process + \"RetinaNet_eq8/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[3] / I[0]\n im_I4590135[:, :, 1] = I[3] / I[1]\n im_I4590135[:, :, 2] = I[3] / I[2]\n if not os.path.exists(path_process + \"RetinaNet_eq9/\"):\n os.mkdir(path_process + \"RetinaNet_eq9/\")\n imageio.imwrite(path_process + \"RetinaNet_eq9/\" + str(k) + \".png\", im_I4590135)\n\n im_I4590135 = np.zeros((500, 500, 3))\n im_I4590135[:, :, 0] = I[0]/I[1]\n im_I4590135[:, :, 1] = I[0] / I[2]\n im_I4590135[:, :, 2] = DOP/255\n if not os.path.exists(path_process + \"RetinaNet_eq10/\"):\n os.mkdir(path_process + \"RetinaNet_eq10/\")\n imageio.imwrite(path_process + \"RetinaNet_eq10/\" + str(k) + \".png\", im_I4590135)\"\"\"\n\n # retinaNet Stokes\n im_Stokes = np.zeros((Stokes.shape[1], Stokes.shape[2], 3))\n im_Stokes[:, :, 0] = Stokes[0]\n im_Stokes[:, :, 1] = Stokes[1]\n im_Stokes[:, :, 2] = Stokes[2]\n if not os.path.exists(path_process + \"Stokes/\"):\n os.mkdir(path_process + \"Stokes/\")\n imageio.imwrite(path_process + \"Stokes/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_Stokes)\n \"\"\"\n\n # RetinaNet Params\n im_Params = np.zeros((500, 500, 3))\n im_Params[:, :, 0] = Stokes[0]\n im_Params[:, :, 1] = AOP\n im_Params[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Params/\"):\n os.mkdir(path_process + \"Params/\")\n imageio.imwrite(path_process + \"Params/\" + imgs_polar[k].split(\".\")[0] + \".png\", im_Params)\"\"\"\n\n \"\"\"# HSV image\n HSV = np.zeros((500, 500, 3))\n HSV[:, :, 0] = AOP / 255 * 179\n HSV[:, :, 1] = DOP\n HSV[:, :, 2] = Stokes[0]\n if not os.path.exists(path_process + \"HSV/\"):\n os.mkdir(path_process + \"HSV/\")\n imageio.imwrite(path_process + \"HSV/\" + imgs_polar[k].split(\".\")[0] + \".png\", HSV)\"\"\"\n\n \"\"\"inten = (I[0] + I[1] + I[2] + I[3]) / 2\n\n hsv = np.uint8(cv2.merge(((phi + np.pi/2)/np.pi*180,rho/np.max(rho)*255, inten/inten.max()*255)))\n if not os.path.exists(path_process + \"HSV_2/\"):\n os.mkdir(path_process + \"HSV_2/\")\n imageio.imwrite(path_process + \"HSV_2/\" + imgs_polar[k].split(\".\")[0] + \".png\", hsv)\"\"\"\n\n \"\"\"# TSV image\n TSV = np.zeros((500, 500, 3))\n TSV[:, :, 0] = AOP\n TSV[:, :, 1] = DOP\n TSV[:, :, 2] = inten / inten.max() * 255\n if not os.path.exists(path_process + \"RetinaNet_TSV/\"):\n os.mkdir(path_process + \"RetinaNet_TSV/\")\n imageio.imwrite(path_process + \"RetinaNet_TSV/\" + str(k) + \".png\", TSV)\n\n # Pauli image\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[2]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = I[0]\n if not os.path.exists(path_process + \"RetinaNet_Pauli/\"):\n os.mkdir(path_process + \"RetinaNet_Pauli/\")\n imageio.imwrite(path_process + \"RetinaNet_Pauli/\" + str(k) + \".png\", Pauli)\"\"\"\n\n \"\"\"Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0] + I[2]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = I[0] - I[2]\n if not os.path.exists(path_process + \"Pauli2/\"):\n os.mkdir(path_process + \"Pauli2/\")\n imageio.imwrite(path_process + \"Pauli2/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\"\"\"\n\n \"\"\"Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0] + I[2]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = I[0] - I[2]\n if not os.path.exists(path_process + \"Pauli2_inv/\"):\n os.mkdir(path_process + \"Pauli2_inv/\")\n imageio.imwrite(path_process + \"Pauli2_inv/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\"\"\"\n\n \"\"\"Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = Stokes[0]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = Stokes[1]\n if not os.path.exists(path_process + \"Pauli2/\"):\n os.mkdir(path_process + \"Pauli2/\")\n imageio.imwrite(path_process + \"Pauli2/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = (I[1]+I[3])/2\n Pauli[:, :, 2] = I[2]\n if not os.path.exists(path_process + \"Sinclair/\"):\n os.mkdir(path_process + \"Sinclair/\")\n imageio.imwrite(path_process + \"Sinclair/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = Stokes[0]\n Pauli[:, :, 1] = I[1] + I[3]\n Pauli[:, :, 2] = Stokes[1]\n if not os.path.exists(path_process + \"Pauli/\"):\n os.mkdir(path_process + \"Pauli/\")\n imageio.imwrite(path_process + \"Pauli/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = I[2]\n Pauli[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Test/\"):\n os.mkdir(path_process + \"Test/\")\n imageio.imwrite(path_process + \"Test/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[1]\n Pauli[:, :, 1] = I[3]\n Pauli[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Test1/\"):\n os.mkdir(path_process + \"Test1/\")\n imageio.imwrite(path_process + \"Test1/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = I[3]\n Pauli[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Test2/\"):\n os.mkdir(path_process + \"Test2/\")\n imageio.imwrite(path_process + \"Test2/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = I[1] + I[2] - I[3]\n Pauli[:, :, 2] = DOP\n if not os.path.exists(path_process + \"Test3/\"):\n os.mkdir(path_process + \"Test3/\")\n imageio.imwrite(path_process + \"Test3/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\"\"\"\n\n \"\"\"Pauli = np.zeros((500, 500, 3))\n Pauli[:, :, 0] = I[0]\n Pauli[:, :, 1] = I[1]\n Pauli[:, :, 2] = (I[0]/I[1]) #/ np.amax(I[0] / I[1]) * 255\n if not os.path.exists(path_process + \"Pauli3/\"):\n os.mkdir(path_process + \"Pauli3/\")\n imageio.imwrite(path_process + \"Pauli3/\" + imgs_polar[k].split(\".\")[0] + \".png\", Pauli)\n\n Rachel = np.zeros((500, 500, 3))\n Rachel[:, :, 0] = Stokes[0]\n Rachel[:, :, 1] = Stokes[1]\n Rachel[:, :, 2] = DOP\n if not os.path.exists(path_process + \"RetinaNet_Rachel/\"):\n os.mkdir(path_process + \"RetinaNet_Rachel/\")\n imageio.imwrite(path_process + \"RetinaNet_Rachel/\" + str(k) + \".png\", Rachel)\n\n Rachel = np.zeros((500, 500, 3))\n Rachel[:, :, 0] = I[1]\n Rachel[:, :, 1] = I[0]\n Rachel[:, :, 2] = DOP\n if not os.path.exists(path_process + \"RetinaNet_Rachel2/\"):\n os.mkdir(path_process + \"RetinaNet_Rachel2/\")\n imageio.imwrite(path_process + \"RetinaNet_Rachel2/\" + str(k) + \".png\", Rachel)\"\"\"", "def add_processed_image(image_proc_type, name, b64_string, export_file_type):\n\n if image_proc_type == \"contrast stretching\":\n info = process_contrast_stretch(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with contrast stretching')\n\n if image_proc_type == \"adaptive equalization\":\n info = process_adapt_equalization(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with adaptive equalization')\n\n if image_proc_type == \"histogram equalization\":\n info = process_histogram_equalization(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with histogram equalization')\n\n if image_proc_type == \"reverse video\":\n info = process_reverse_image(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with reverse image')\n\n if image_proc_type == \"log compression\":\n info = process_log_compression(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with log compression')\n\n return jsonify(\"it worked\")", "def transform(self, previousimage):", "def clumpy_graph(self,img):\n \n id = self._getGraphId()\n root = 'S_%s' % (id,)\n pngname = root + '.png' ; epsname = root + '.eps'\n jpgname = root + '.jpg'\n doStamp(img.copy(),pngname,format='PNG')\n Convert(pngname,jpgname)\n \n Painted = Paint(jpgname)\n try : Painted.load()\n except IOError : stop()\n text = 'S=%5.2f' % (self['M_S'])\n # Painted.Graffiti(text,commtextpos)\n Painted.save(jpgname) \n Painted.release()\n \n Convert(jpgname,epsname)\n os.system('rm %s %s' % (pngname,jpgname))\n self['figures']['S'] = epsname\n self['figcomms']['S'] = text", "def graphCut(img, center, radius, temp, edge, count, editPoints, padList, theta_width, phi_width):\r\n\r\n\r\n \"\"\"Important note. The labeled image is referred to as temp, or self.temp in the interface.\r\n This stands for template. The previously labled image is fed back into the graphcut\"\"\"\r\n \r\n \"\"\"create polar images and cost arrays\"\"\"\r\n \r\n print \"RUNNING GRAPHCUT!\"\r\n img= padImage(img, padList)\r\n temp= padImage(temp, padList)\r\n edge= padImage(edge, padList)\r\n center= padCenter(center, padList)\r\n \r\n polar_img= img2polar(img, center, radius, theta_width=theta_width, phi_width=phi_width)\r\n\r\n \r\n \r\n polar_grad, y, x = np.gradient(np.array(polar_img, dtype='float'))\r\n \"\"\"Lockett 100416 replacement line below to not use gradient when the image has a surface label\"\"\"\r\n \"\"\"polar_grad = -1 * np.array(polar_img, dtype='float')\"\"\"\r\n \r\n \r\n polar_cost = -1 * np.ones(polar_img.shape)\r\n for r in range(1,radius):\r\n polar_cost[r]= polar_grad[r]-polar_grad[r-1]\r\n\r\n \r\n \r\n \"\"\"\r\n flip the cost image upside down. This is so that the base set is at the bottom of the array\r\n since the graphcut cuts from top to bottom, this inversion is necessary.\r\n \"\"\"\r\n polar_cost_inv=polar_cost[::-1,:,:]\r\n\r\n print \"CONSTRUCTING GRAPH EDGES... \"\r\n \r\n \"\"\"construct the graph using PyMaxFlow\"\"\"\r\n g=maxflow.GraphFloat()\r\n nodeids=g.add_grid_nodes(polar_img.shape)\r\n structure=np.zeros((3,3,3))\r\n structure[2]= np.array([[0,10000,0],[10000, 10000, 10000],[0, 10000, 0]])\r\n g.add_grid_edges(nodeids, structure=structure, symmetric=False)\r\n\r\n \r\n \"\"\"convert the previously labeled image (temp) into a polar transform image. Take the labels and\r\n give them high cost edge weights so the segmentation avoids previously labeled objects\"\"\"\r\n polar_lbl_img= img2polar(temp, center, radius, theta_width=theta_width, phi_width=phi_width)\r\n polar_lbl_img_inv= polar_lbl_img[::-1,:]\r\n \r\n lbl_caps= polar_lbl_img_inv>0\r\n self_caps= (polar_lbl_img_inv==count)\r\n lbl_caps-=self_caps\r\n lbl_source_caps= np.zeros(lbl_caps.shape)\r\n lbl_sink_caps= lbl_caps*10000\r\n g.add_grid_tedges(nodeids, lbl_source_caps, lbl_sink_caps)\r\n \r\n structure2= 10000*np.array([[0,0,0],[0,0,1],[0,1,0]])\r\n g.add_grid_edges(nodeids[radius-1], structure=structure2, symmetric=True)\r\n\r\n \"\"\"add terminal edges using two arrays whose elemnts are the costs of the edges from the source and to the\r\n sink\"\"\"\r\n print \"CONSTRUCTING GRAPH TEDGES...\"\r\n sinkcaps= polar_cost_inv * (polar_cost_inv>=0)\r\n sourcecaps = -1 * polar_cost_inv * (polar_cost_inv<0)\r\n g.add_grid_tedges(nodeids, sourcecaps, sinkcaps)\r\n\r\n \r\n\r\n \r\n \"\"\"accounts for edit points. Takes every point in the edit point list, converts it to its spherical coordinate, and adds high cost\r\n edges in the column of that edit point inverts the x and y coordinates of the center\"\"\"\r\n center= np.array((center[0], center[2], center[1]))\r\n if len(editPoints)!=0:\r\n for coords in editPoints:\r\n\r\n \r\n rad= math.sqrt((center[0]-coords[0])**2+ (center[1]-coords[2])**2 + (center[2]-coords[1])**2) \r\n theta= math.atan2(center[2]-coords[1], coords[2]-center[1])\r\n print str((coords[0]-center[0])/(rad+1))\r\n phi=math.acos(float(coords[0]-center[0])/(rad+1))\r\n if theta<0:\r\n theta=2*math.pi+ theta\r\n theta= theta_width- theta_width*theta/(2*math.pi)-1\r\n phi= phi_width*phi/(math.pi)-1\r\n rad= radius- rad\r\n print \"POLAR COORDS: \" + str((rad, theta, phi))\r\n\r\n for r in range(0, radius):\r\n if r<=rad:\r\n g.add_tedge(nodeids[r, theta, phi], 0, 10000)\r\n \r\n else:\r\n g.add_tedge(nodeids[r, theta, phi], 10000, 0) \r\n\r\n\r\n\r\n\r\n print \"CUTTING GRAPH...\"\r\n g.maxflow()\r\n\r\n \"\"\"s-t mincut of graph. This is converted to cartesian coordinates with the function img2cart. The\r\n images are also closed to eliminate spotty areas\"\"\"\r\n \r\n print \"STARTING CARTESIAN TRANSFORM...\"\r\n polar_img_seg= np.invert(g.get_grid_segments(nodeids)[::-1,:,:])\r\n\r\n \r\n edge_img= np.zeros(img.shape)\r\n seg_img= ndimage.binary_closing(img2cart(img, polar_img_seg, center, radius, theta_width, phi_width))\r\n \r\n \r\n \"\"\"create an edge image of the segmented object\"\"\"\r\n strel=np.ones((3,3,3))\r\n erode_img=ndimage.binary_erosion(seg_img, strel)\r\n edge_img=np.logical_xor(seg_img, erode_img)\r\n \r\n\r\n \"\"\"shears the segmentation image and edge if padding was applied\"\"\"\r\n \r\n\r\n \"\"\"add the object back on to the template image (and the edge image back on the template edge)\r\n If there was an editpoint involved, remove the previous segmentation of that object and add back\r\n on the edited object\"\"\"\r\n if len(editPoints)!=0:\r\n del_img= (temp==count)*count\r\n temp-=del_img\r\n\r\n del_edge_img= (edge==count)*count\r\n edge-= del_edge_img\r\n\r\n\r\n temp+=seg_img*count\r\n edge+=edge_img*count\r\n\r\n temp= shearImage(temp, padList)\r\n edge= shearImage(edge, padList)\r\n \r\n \r\n\r\n print \"FINISHED!\"\r\n \r\n return temp, edge", "def _process_image(filename, label):\n # Read the image file.\n height = 224\n width = 224\n img_raw = tf.io.read_file(filename)\n jpeg_img = tf.image.decode_jpeg(img_raw, channels=3)\n jpeg_img_resized = tf.image.resize(jpeg_img, (height, width))\n\n return jpeg_img_resized, label", "def add_graph(self, model, image_size):\n dummy_input = torch.rand(2, 1, image_size, image_size)\n self.writer.add_graph(model, dummy_input, True)", "def jpeg_encode(compressed: model.CompressedImage) -> hic.HicImage:\n utils.debug_msg(\"Starting JPEG encoding\")\n dc_comps = utils.dict_map(compressed.as_dict,\n lambda _, v: differential_coding(transform.split_matrix(v, settings.JPEG_BLOCK_SIZE)))\n\n utils.debug_msg(\"Determined differences DC components\")\n\n def ac_comp_fun(k, v):\n utils.debug_msg(\"Determining AC components for: \" + k)\n splits = transform.split_matrix(v, settings.JPEG_BLOCK_SIZE)\n acs = transform.ac_components(splits)\n utils.debug_msg(\"Calculating RLE for: \" + k)\n out = run_length_coding(acs)\n return out\n\n # on each transformed channel, run RLE on the AC components of each block\n ac_comps = utils.dict_map(compressed.as_dict, ac_comp_fun)\n\n utils.debug_msg(\"Determined RLEs for AC components\")\n dc_huffs = utils.dict_map(dc_comps, lambda _, v: huffman.HuffmanTree.construct_from_data(v))\n ac_value_huffs = utils.dict_map(ac_comps,\n lambda _, v: huffman.HuffmanTree.construct_from_data(v, key_func=lambda s: s.value))\n ac_length_huffs = utils.dict_map(ac_comps,\n lambda _, v: huffman.HuffmanTree.construct_from_data(v,\n key_func=lambda s: s.length))\n\n def encode_huff(d):\n huffs = [t[1] for t in d.items()]\n return [huffman_encode(h) for h in huffs]\n\n def encode_data(d):\n huffs = [t[1] for t in d.items()]\n return [huffman_data_encode(h) for h in huffs]\n\n payloads = utils.flatten([\n encode_huff(dc_huffs),\n encode_huff(ac_value_huffs),\n encode_huff(ac_length_huffs),\n\n encode_data(dc_huffs),\n encode_data(ac_value_huffs),\n encode_data(ac_length_huffs),\n\n [\n hic.TupP(compressed.shape[0][0], compressed.shape[0][1]),\n hic.TupP(compressed.shape[1][0], compressed.shape[1][1])\n ]\n ])\n\n return hic.HicImage.jpeg_image(payloads)", "def process(self, image, annotation_meta=None):\n # image dasta stored inside DataRepresentation in data field\n data = image.data\n # internally we work with numpy arrays, so we need to convert it to pillow image object for making resize\n resized_data = Image.fromarray(data).resize((self.size, self.size), Image.ANTIALIAS)\n # return back data to numpy array\n data = np.array(resized_data)\n # expand dims for gray scale image\n if len(data.shape) == 2:\n data = np.expand_dims(data, axis=-1)\n image.data = data\n # return updated DataRepresentation\n return image", "def process_image(\n df: pd.DataFrame,\n current_id: int,\n encoder: LabelBinarizer,\n current_category_name: str,\n images_in_category: List,\n output_image_folder_path: str,\n resized_image_shape:Tuple,\n transformations:List[TransformationsEnum],\n zero_fill_id=16,\n):\n\n #todo save original picture name with folder it is in, so we can upload only new images and skip the ones that already are on the bucket\n for image in images_in_category:\n\n orignal_name = os.path.join(current_category_name,os.path.split(image)[1])\n image_path = str(image)\n\n str_id = str(current_id).zfill(zero_fill_id)\n current_id += 1\n\n image_new_name = f\"img_{current_category_name}_{str_id}.png\"\n binarized_label = encoder.transform([current_category_name])\n\n #save image file name, its category in 1hot encoding and its category name\n df.loc[len(df)] = [image_new_name, binarized_label.flatten().tolist(), current_category_name, orignal_name]\n new_image_path = os.path.join(output_image_folder_path, image_new_name)\n img = Image.open(image_path).convert('RGB').resize(resized_image_shape)\n\n #apply transformations\n if TransformationsEnum('hog') in transformations:\n img = to_hog(img)\n # because something fucky happens when you try apply HOG and rescale its intensity and then try to save it using pillow :(\n plt.imsave(new_image_path, img)\n continue\n\n if TransformationsEnum('grayscale') in transformations:\n img = to_gray_scale(img)\n\n if TransformationsEnum('edge_enh') in transformations:\n img = edge_enhacement(img)\n\n if TransformationsEnum('tv_den') in transformations:\n img = denoise_tv(img)\n\n img.save(new_image_path)\n\n\n #reutrning current_id instead\n print(f\"Processed category {current_category_name}, {len(df)} in total\")\n return df, current_id", "def decode(self, image):\r\n raise NotImplementedError(\"Not Implemented\")", "def encode(self, encode_data, image):\r\n raise NotImplementedError(\"Not Implemented\")", "def encode_png(track_metadata):\n\tprint(\"---- Encoding\", track_metadata.file_name, \"to PNG...\")\n\n\t# First step: OptiPNG.\n\tnew_file_name = track_metadata.file_name + \".png\"\n\toptipng_command = [\"optipng\", \"-o7\", \"-strip\", \"all\", \"-snip\", \"-out\", new_file_name, track_metadata.file_name]\n\tprint(optipng_command)\n\tprocess = subprocess.Popen(optipng_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"OptiPNG failed with exit code {exit_code}. CERR: {cerr}\".format(exit_code=exit_code, cerr=cerr))\n\n\tect_command = [\"/home/ruben/encoding/Efficient-Compression-Tool/build/ect\", \"-9\", \"-strip\", \"--allfilters-b\", \"--mt-deflate\", new_file_name]\n\tprint(ect_command)\n\tprocess = subprocess.Popen(ect_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"ECT failed with exit code {exit_code}. CERR: {cerr}\".format(exit_code=exit_code, cerr=cerr))\n\n\t#Delete old file.\n\tif os.path.exists(track_metadata.file_name):\n\t\tos.remove(track_metadata.file_name)\n\n\ttrack_metadata.file_name = new_file_name\n\ttrack_metadata.codec = \"png\"", "def on_image(self, image):", "def main():\n\n #Parse input arguments\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n\n parser.add_argument(\"-i\", \"--image\", dest=\"image\",\n help=\"specify the name of the image\", metavar=\"IMAGE\")\n\n args = parser.parse_args()\n\n #Load image\n if args.image is None:\n print(\"Please specify the name of image\")\n print(\"use the -h option to see usage information\")\n sys.exit(2)\n else:\n image_name = args.image.split(\".\")[0]\n input_image = cv2.imread(args.image, 0)\n\n\n bin_img = bi.binary_image()\n hist = bin_img.compute_histogram(input_image)\n\n outputDir = 'output/cellct/'\n outputDir_compress = 'output/Compression/'\n\n #Saving histogram to output directory \n hist_fig = plt.plot(hist)\n plt.savefig(outputDir+\"hist.png\")\n\n threshold = bin_img.find_optimal_threshold(hist)\n print(\"Optimal threshold: \", threshold)\n\n binary_img = bin_img.binarize(input_image)\n output_image_name = outputDir + \"binary_image_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, binary_img)\n\n #blobcoloring\n cell_count_obj = cc.cell_counting()\n\n regions = cell_count_obj.blob_coloring(binary_img)\n stats = cell_count_obj.compute_statistics(regions)\n\n cell_stats_img = cell_count_obj.mark_regions_image(binary_img, stats)\n output_image_name = outputDir + \"cell_stats_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, cell_stats_img)\n\t\n #Compression\n rle_obj = rle.rle()\n rle_code = rle_obj.encode_image(binary_img)\n print(\"-------------- Runlength Code -------------------\")\n print(rle_code)\n\n [height, width] = binary_img.shape\n\n decoded_image = rle_obj.decode_image(rle_code, height, width)\n\n output_image_name = outputDir_compress + \"decoded_image_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, decoded_image)", "def image(self):\n # TODO: make sure this method works for png, gif, tiff\n if self.has_metadata:\n self.extract_metadata()\n tempdir_path = self.make_tempdir()\n tempfile_path = os.path.join(tempdir_path, self.filename)\n warnings.simplefilter('error', Image.DecompressionBombWarning)\n try: # Do image conversions\n img_in = Image.open(self.src_path)\n img_out = Image.frombytes(img_in.mode, img_in.size, img_in.tobytes())\n img_out.save(tempfile_path)\n self.src_path = tempfile_path\n except Exception as e: # Catch decompression bombs\n # TODO: change this from all Exceptions to specific DecompressionBombWarning\n self.add_error(e, \"Caught exception (possible decompression bomb?) while translating file {}.\".format(self.src_path))\n self.make_dangerous()\n self.add_file_string('Image file')\n self.set_property('processing_type', 'image')", "def autoencode(self, imgs):\n x = tf.layers.conv2d(imgs, filters=32, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu)\n shape2 = x.shape\n x = tf.layers.max_pooling2d(x, pool_size=2, strides=2)\n x = tf.layers.conv2d(x, filters=64, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu)\n shape1 = x.shape\n x = tf.layers.max_pooling2d(x, pool_size=2, strides=2)\n # shape0 = x.shape\n # x = tf.layers.conv2d(x, filters=128, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu)\n # x = tf.layers.max_pooling2d(x, pool_size=2, strides=2)\n# shape_orig = x.shape\n# x = tf.layers.flatten(x)\n# shape_dense = x.shape\n# x = tf.layers.dense(x, units=512, activation=tf.nn.relu)\n# x = tf.layers.dense(x, units=shape_dense[-1], activation=tf.nn.relu)\n# x = tf.reshape(x, [-1, shape_orig[1], shape_orig[2], shape_orig[3]])\n # x = tf.layers.conv2d(x, filters=128, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu)\n # x = tf.image.resize(x, size=(shape0[1], shape0[2]), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n x = tf.layers.conv2d(x, filters=64, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu)\n x = tf.image.resize(x, size=(shape1[1], shape1[2]), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n x = tf.layers.conv2d(x, filters=32, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu)\n x = tf.image.resize(x, size=(shape2[1], shape2[2]), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n x = tf.layers.conv2d(x, filters=3, kernel_size=3, strides=1, padding='same', activation=tf.nn.relu)\n \n return x", "def process(\n self,\n image: np.array\n ) -> np.array:\n pass", "def compress_image(filename, s):\r\n image = imread(filename) / 255\r\n size = image.shape\r\n orig_entries = image.size\r\n #colored\r\n if len(size) == 3:\r\n #plot original\r\n orig = plt.subplot(121)\r\n orig.imshow(image)\r\n orig.axis(\"off\")\r\n #red in image\r\n R = image[:,:,0]\r\n #green in image\r\n G = image[:,:,1]\r\n #blue in image\r\n B = image[:,:,2]\r\n #approximate red, green and blue in range\r\n new_R, entries_R = svd_approx(R,s)\r\n new_R = np.clip(new_R,0,1)\r\n new_G, entries_G = svd_approx(G,s)\r\n new_G = np.clip(new_G,0,1)\r\n new_B, entries_B = svd_approx(B,s)\r\n new_B = np.clip(new_B,0,1)\r\n #stack all in one array\r\n new_image = np.dstack((new_R,new_G,new_B))\r\n #plot image\r\n new = plt.subplot(122)\r\n new.imshow(new_image)\r\n new.axis(\"off\")\r\n #title image with saved number of entries\r\n plt.suptitle(str(orig_entries - (entries_R+entries_G+entries_B)) + \" Entries\")\r\n\r\n\r\n #grayscale\r\n else:\r\n #plot original\r\n orig = plt.subplot(121)\r\n orig.imshow(image, cmap=\"gray\")\r\n orig.axis(\"off\")\r\n #approximate the image\r\n new_A, entries = svd_approx(image,s)\r\n #plot it\r\n new = plt.subplot(122)\r\n new.imshow(new_A, cmap=\"gray\")\r\n new.axis(\"off\")\r\n #title image with saved number of entries\r\n plt.suptitle(str(orig_entries - entries) + \" Entries\")\r\n\r\n plt.show()", "def process_images(self):\n self.processed_content_image = tf.keras.applications.vgg19.preprocess_input(\n self.content_image)\n self.processed_style_image = tf.keras.applications.vgg19.preprocess_input(\n self.style_image)", "def gen():\n global dataFrame\n while True:\n frame = vs.read()\n # frame = imutils.resize(frame, width=400)\n \n (flag, encodedImage) = cv2.imencode(\".jpg\", frame.copy())\n if not flag: continue\n # print (encodedImage)\n dataFrame = yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + bytearray(encodedImage) + b'\\r\\n')", "def add_art(self,path,size=\"500\"):\n error=False\n if path:\n print(\"processing %s to %s\" % (path,self.uid))\n f=open(path,'rb') \n filedata=f.read()\n extension=(imghdr.what('',filedata) or path.rsplit(\".\")[-1].lower()).replace('jpeg','jpg')\n if not filedata:\n error= \"NO IMAGE FOUND AT '%s'\" % path\n print(error)\n elif extension in ('bmp','png'):\n filedata=self.Image.convert(filedata)\n extension='jpg' \n elif extension not in ('gif','png','jpg','jpeg'):\n error=\"only JPEG, GIF, PNG, and BMP are supported\"\n print(error)\n if not error:\n # create a new image page\n image=self.Image.new()\n image.parent=self.uid\n image.kind='image'\n image.seq=0xFFFFFF#place at end of siblings\n # set default size \n image.stage='right full %sx%s' % (size,size) #rest of stage data will be added on the fly later by get_stage_data() \n image.set_lineage()\n image.code=\"%s.%s\" % (image.uid,extension)\n image.when=DATE()\n image.flush() #store the image page\n image.renumber_siblings_by_kind()#keep them in order\n # save the image file\n image.save_file(filedata)\n # return\n print('image \"%s\" added' % image.code)\n return image\n return None", "def compress_image(filename,k):", "def PrePush(self, image):\n pass", "def large_image(self):\n pass", "def main():\r\n original = SimpleImage(\"images/poppy.png\")\r\n original.show()\r\n # shrink function\r\n after_shrink = shrink('images/poppy.png')\r\n after_shrink.show()", "def processImage(fpaths_src, label_map, fnames_src, img_idx):\n global counter\n \n n_imgs = len(fpaths_src)\n print(\"Processing %s -- %s/%s (%s%%)\"%(fnames_src[img_idx],counter,n_imgs,round(100.*counter/n_imgs)))\n \n path = fpaths_src[img_idx]\n src_image_raw = Image.open(path, 'r')\n \n # size normalization of the image\n if not (args.resize == None):\n src_image_raw = src_image_raw.resize(size=(int(args.resize), int(args.resize)), resample=Image.BILINEAR)\n \n # convert to writable numpy array\n src_image = np.asarray(src_image_raw, dtype=np.uint8)\n src_image.setflags(write=True)\n \n # some dummy label\n label = -99.99\n # the labels\n if not (label_map == {}):\n # let the label start at 1, instead of 0\n label = int(label_map[fnames_src[img_idx]])+1\n else:\n # add a dummy label (between 0 and 1)\n label = np.random.rand()\n \n image_features = []\n \n # add the original\n image_features.append(generateFeatures(src_image,label,args.knn))\n \n if args.augment == 1:\n print \"Augmenting dataset...\"\n # data augmentation techniques\n rotation_angles = [i for i in xrange(36,360,36)] # samples are transformed by these rotation angles\n \n flip_x = True # data augmentation by flipping around x axis\n flip_y = True # data augmentation by flipping around y axis\n flip_xy= True # data augmentation by flipping around x AND y axis\n \n for angle in rotation_angles:\n rot_matrix = cv2.getRotationMatrix2D(\n (src_image.shape[1]/2.,src_image.shape[0]/2.),\n angle,\n 1.0)\n rot_sample_crop = np.array([])\n rot_sample_crop = cv2.warpAffine(src_image,\n rot_matrix,\n (src_image.shape[1],src_image.shape[0]),\n rot_sample_crop,\n cv2.INTER_LINEAR,\n cv2.BORDER_REFLECT_101)\n \n # add the sample to the dataset\n image_features.append(generateFeatures(rot_sample_crop,label,args.knn))\n \n # add 3 flipped copies\n if flip_x:\n rot_sample_crop_x = cv2.flip(rot_sample_crop,0)\n image_features.append(generateFeatures(rot_sample_crop_x,label,args.knn))\n if flip_y:\n rot_sample_crop_y = cv2.flip(rot_sample_crop,1)\n image_features.append(generateFeatures(rot_sample_crop_y,label,args.knn))\n if flip_xy:\n rot_sample_crop_xy = cv2.flip(rot_sample_crop,-1)\n image_features.append(generateFeatures(rot_sample_crop_xy,label,args.knn))\n \n counter+=1\n\n # return a nx128 or nxk matrix for the features of all modifications of this image\n feat_matrix = np.asarray(image_features)\n return feat_matrix", "def whole_inference(self, img, img_meta, rescale):\n if not isinstance(img_meta, list):\n img_meta = img_meta.data\n seg_logit, gen_out = self.encode_decode(img, img_meta)\n if rescale:\n seg_logit = resize(\n seg_logit,\n size=img_meta[0]['ori_shape'][:2],\n mode='bilinear',\n align_corners=self.align_corners,\n warning=False)\n\n gen_out = resize(\n gen_out,\n size=img_meta[0]['ori_shape'][:2],\n mode='bilinear',\n align_corners=self.align_corners,\n warning=False)\n\n return seg_logit, gen_out", "def encode_decode(self, img, img_metas):\n x, y = self.extract_feat(img)\n out = self._decode_head_forward_test(x, img_metas)\n out_gen = self.G_head(y, img)\n # out_gen = resize(\n # input=out_gen,\n # size=img.shape[2:],\n # mode='bilinear',\n # align_corners=self.align_corners)\n\n out = resize(\n input=out,\n size=img.shape[2:],\n mode='bilinear',\n align_corners=self.align_corners)\n return out, out_gen", "def __init__(self, image_dir, instances_json, stuff_json=None,\n stuff_only=True, image_size=(64, 64), mask_size=32, normalize_images=True, max_samples=None,\n include_relationships=True, min_object_size=0.02, min_objects=3, max_objects=8,\n include_other=False, instance_whitelist=None, stuff_whitelist=None, learned_transitivity=False,\n include_dummies=True, use_transitivity=False, use_converse=False, learned_symmetry=False,\n learned_converse=False):\n super(CocoSceneGraphDataset, self).__init__()\n self.use_converse = use_converse\n self.learned_transitivity = learned_transitivity\n self.learned_symmetry = learned_symmetry\n self.learned_converse = learned_converse\n self.include_dummies = include_dummies\n self.image_dir = image_dir\n # self.mask_size = image_size[0]\n self.mask_size = mask_size\n self.masks = True\n if self.mask_size == 0:\n self.masks = False\n self.mask_size = 32\n\n self.max_samples = max_samples\n self.normalize_images = normalize_images\n self.include_relationships = include_relationships\n self.set_image_size(image_size)\n self.use_transitivity = use_transitivity\n\n with open(instances_json, 'r') as f:\n instances_data = json.load(f)\n\n with open(stuff_json, 'r') as f:\n stuff_data = json.load(f)\n\n self.image_ids = []\n self.image_id_to_filename = {}\n self.image_id_to_size = {}\n for image_data in instances_data['images']:\n image_id = image_data['id']\n filename = image_data['file_name']\n width = image_data['width']\n height = image_data['height']\n self.image_ids.append(image_id)\n self.image_id_to_filename[image_id] = filename\n self.image_id_to_size[image_id] = (width, height)\n\n self.vocab = {\n 'object_name_to_idx': {},\n 'pred_name_to_idx': {},\n }\n object_idx_to_name = {}\n all_instance_categories = []\n for category_data in instances_data['categories']:\n category_id = category_data['id']\n category_name = category_data['name']\n all_instance_categories.append(category_name)\n object_idx_to_name[category_id] = category_name\n self.vocab['object_name_to_idx'][category_name] = category_id\n all_stuff_categories = []\n\n for category_data in stuff_data['categories']:\n category_name = category_data['name']\n category_id = category_data['id']\n all_stuff_categories.append(category_name)\n object_idx_to_name[category_id] = category_name\n self.vocab['object_name_to_idx'][category_name] = category_id\n\n if instance_whitelist is None:\n instance_whitelist = all_instance_categories\n if stuff_whitelist is None:\n stuff_whitelist = all_stuff_categories\n category_whitelist = set(instance_whitelist) | set(stuff_whitelist)\n\n # Add object data from instances\n self.image_id_to_objects = defaultdict(list)\n for object_data in instances_data['annotations']:\n image_id = object_data['image_id']\n _, _, w, h = object_data['bbox']\n W, H = self.image_id_to_size[image_id]\n box_area = (w * h) / (W * H)\n box_ok = box_area > min_object_size\n object_name = object_idx_to_name[object_data['category_id']]\n category_ok = object_name in category_whitelist\n other_ok = object_name != 'other' or include_other\n if box_ok and category_ok and other_ok:\n self.image_id_to_objects[image_id].append(object_data)\n\n # Add object data from stuff\n image_ids_with_stuff = set()\n for object_data in stuff_data['annotations']:\n image_id = object_data['image_id']\n image_ids_with_stuff.add(image_id)\n _, _, w, h = object_data['bbox']\n W, H = self.image_id_to_size[image_id]\n box_area = (w * h) / (W * H)\n box_ok = box_area > min_object_size\n object_name = object_idx_to_name[object_data['category_id']]\n category_ok = object_name in category_whitelist\n other_ok = object_name != 'other' or include_other\n if box_ok and category_ok and other_ok:\n self.image_id_to_objects[image_id].append(object_data)\n\n new_image_ids = []\n for image_id in self.image_ids:\n if image_id in image_ids_with_stuff:\n new_image_ids.append(image_id)\n self.image_ids = new_image_ids\n\n all_image_ids = set(self.image_id_to_filename.keys())\n image_ids_to_remove = all_image_ids - image_ids_with_stuff\n for image_id in image_ids_to_remove:\n self.image_id_to_filename.pop(image_id, None)\n self.image_id_to_size.pop(image_id, None)\n self.image_id_to_objects.pop(image_id, None)\n\n # COCO category labels start at 1, so use 0 for __image__\n self.vocab['object_name_to_idx']['__image__'] = 0\n\n # Build object_idx_to_name\n name_to_idx = self.vocab['object_name_to_idx']\n assert len(name_to_idx) == len(set(name_to_idx.values()))\n max_object_idx = max(name_to_idx.values())\n idx_to_name = ['NONE'] * (1 + max_object_idx)\n for name, idx in self.vocab['object_name_to_idx'].items():\n idx_to_name[idx] = name\n self.vocab['object_idx_to_name'] = idx_to_name\n\n # Prune images that have too few or too many objects\n new_image_ids = []\n total_objs = 0\n for image_id in self.image_ids:\n num_objs = len(self.image_id_to_objects[image_id])\n total_objs += num_objs\n if min_objects <= num_objs <= max_objects:\n new_image_ids.append(image_id)\n self.image_ids = new_image_ids\n\n self.register_augmented_relations()\n\n self.vocab[\"attributes\"] = {}\n self.vocab[\"attributes\"]['objects'] = self.vocab['object_name_to_idx']\n self.vocab[\"reverse_attributes\"] = {}\n for attr in self.vocab[\"attributes\"].keys():\n self.vocab[\"reverse_attributes\"][attr] = {v: k for k, v in self.vocab[\"attributes\"][attr].items()}", "def __call__(self, img, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, img, *args, **kwargs):\n raise NotImplementedError", "def op(self, img):\n raise NotImplementedError(\"'op' is an abstract method.\")", "def parser_image_data(jpeg_file_path):\n image = tf.io.read_file(jpeg_file_path)\n image = tf.image.decode_jpeg(image)\n image = tf.image.resize(image, [image_height, image_width])\n image = tf.cast(image, dtype=tf.float32)\n image = (image / 127.5) - 1.0\n return image", "def imgProcessing(self):\n if (self.image_width > 320):\n self.cv_image = imutils.resize(self.cv_image, width = 320)\n else:\n pass\n\n \"\"\" optional -- image-mirrored \"\"\"\n # self.cv_image = cv2.flip(self.cv_image, 1)", "def _process_image(filename, coder):\n # Read the image file.\n with tf.gfile.FastGFile(filename, 'rb') as f:\n image_data = f.read()\n \n # Convert any PNG to JPEG's for consistency.\n if _is_png(filename):\n print('Converting PNG to JPEG for %s' % filename)\n image_data = coder.png_to_jpeg(image_data)\n # Decode the RGB JPEG.\n image = coder.decode_jpeg(image_data)\n\n # Check that image converted to RGB\n assert len(image.shape) == 3\n height = image.shape[0]\n width = image.shape[1]\n assert image.shape[2] == 3\n\n return image_data, height, width", "def fileResizeObscure(new_filepath):\n # Resize\n img1 = Image.open(new_filepath)\n img2=image_reduce(img1)\n *** Stopped working here\n newpath=\"toupload\\\\%s\" % new_filepath\n # Block ID\n width=img2.size[0]\n height=img2.size[1]\n # Obscuring params were decided by trial and error using fraction of width and height\n x1=int(0.16*width)\n x2=int(0.28*width)\n y1=int(0.94*height)\n y2=int(0.98*height) \n # Faster but easier to snoop? should not be since it changes the pixels\n draw = ImageDraw.Draw(img2)\n draw.rectangle([(x1,y1),(x2,y2)],fill=\"white\")\n del draw\n \n img2.save(newpath,optimize=True,quality=95)", "def process(self, _edObject=None):\n\n EDPluginExec.process(self)\n EDVerbose.DEBUG(\"EDPluginExecThumbnailv10.process\")\n\n# try:\n# except Exception:\n# edfImage = EDF(self.inputFilename)\n# self.npaImage = edfImage.GetData(0)\n\n# Read the image using FABIO\n isRGB = False\n pilOutputImage = None\n if self.inputFilename is not None:\n try:\n fabioImage = openimage(self.inputFilename)\n self.npaImage = fabioImage.data\n except Exception:\n pilInputImage = Image.open(self.inputFilename)\n x, y = pilInputImage.size\n ImageFile.MAXBLOCK = x * y\n if pilInputImage.mode == \"1\":\n self.npaImage = numpy.asarray(pilInputImage).astype(\"uint8\")\n isRGB = False\n elif pilInputImage.mode == \"F\":\n self.npaImage = numpy.asarray(pilInputImage)\n isRGB = False\n elif pilInputImage.mode == \"L\":\n self.npaImage = numpy.asarray(pilInputImage)\n isRGB = False\n elif pilInputImage.mode == \"P\":\n self.npaImage = numpy.asarray(pilInputImage.convert(\"RGB\"))\n isRGB = True\n elif pilInputImage.mode == \"RGB\":\n self.npaImage = numpy.asarray(pilInputImage)\n isRGB = True\n elif pilInputImage.mode == \"CMJK\":\n self.npaImage = numpy.asarray(pilInputImage.convert(\"RGB\"))\n isRGB = True\n\n dtype = self.npaImage.dtype\n NPAImageFloat = None\n\n# crop border\n if len(self.cropBorders) > 0:\n\n if len(self.cropBorders) == 1:\n crop0 = self.cropBorders[0]\n crop1 = self.cropBorders[0]\n else:\n crop0 = self.cropBorders[0]\n crop1 = self.cropBorders[1]\n if isRGB:\n self.npaImage = self.npaImage[crop0:-crop0, crop1:crop1, :]\n else:\n self.npaImage = self.npaImage[crop0:-crop0, crop1:crop1]\n\n\n# Set maxima and minima\n if (self.minLevelUnit is not None) or (self.maxLevelUnit is not None):\n sortedArray = self.npaImage.flatten()\n sortedArray.sort()\n\n if self.minLevel is not None:\n self.normalize = True\n if isRGB:\n EDVerbose.warning(\"It is not allowed to set Min with RGB data\")\n else:\n if self.minLevelUnit in [\"%\", \"percent\"]:\n self.minLevel = sortedArray[int(round(float(self.minLevel) * sortedArray.size / 100.0))]\n if isinstance(self.npaImage[0, 0], int):\n self.npaImage = numpy.maximum(self.npaImage, int(self.minLevel) * numpy.ones_like(self.npaImage))\n else:\n self.npaImage = numpy.maximum(self.npaImage, self.minLevel * numpy.ones_like(self.npaImage))\n\n if self.maxLevel is not None:\n self.normalize = True\n if isRGB:\n EDVerbose.warning(\"It is not allowed to set Max with RGB data\")\n else:\n if self.maxLevelUnit in [\"%\", \"percent\"]:\n self.maxLevel = sortedArray[int(round(float(self.maxLevel) * sortedArray.size / 100.0))]\n if isinstance(self.npaImage[0, 0], int):\n self.npaImage = numpy.minimum(self.npaImage, int(self.maxLevel) * numpy.ones_like(self.npaImage))\n else:\n self.npaImage = numpy.minimum(self.npaImage, self.maxLevel * numpy.ones_like(self.npaImage))\n\n# Scipy filters come here:\n if len(self.gaussianBlur) > 0:\n if len(self.gaussianBlur) == 1 :\n kernel = (self.gaussianBlur[0], self.gaussianBlur[0])\n else:\n kernel = (self.gaussianBlur[0], self.gaussianBlur[1])\n if isRGB:\n kernel = (kernel[0], kernel[1], 0)\n self.npaImage = scipy.ndimage.gaussian_filter(self.npaImage, kernel)\n\n if len(self.dilatation) > 0:\n if len(self.dilatation) == 1:\n kernel = (self.dilatation[0], self.dilatation[0])\n else:\n kernel = (self.dilatation[0], self.dilatation[1])\n if isRGB:\n kernel = (kernel[0], kernel[1], 0)\n self.npaImage = scipy.ndimage.morphology.grey_dilation(self.npaImage, kernel)\n\n\n#Normalization ; equalization\n if (self.normalize is True) or (self.equalize is True):\n if isRGB is True:\n self.npaImage = numpy.asarray(ImageOps.equalize(Image.fromarray(self.npaImage)))\n else:\n EDVerbose.DEBUG(\"EDPluginExecThumbnailv10: Normalization\")\n vmin = self.npaImage.min()\n vmax = self.npaImage.max()\n NPAImageFloat = (self.npaImage.astype(numpy.float32) - float(vmin)) / (float(vmax) - float(vmin))\n if (self.equalize == True):\n nbr_bins = 64\n NPAImageFloatFlat = NPAImageFloat.flatten()\n imhist, bins = numpy.histogram(NPAImageFloatFlat, nbr_bins, normed=True) #get image histogram\n cdf = imhist.cumsum() #cumulative distribution function\n ncdf = cdf / cdf[-1] #normalized cumulative distribution function\n# print ncdf\n NPAImageFloat2Flat = numpy.interp(NPAImageFloatFlat, bins, [0] + ncdf.tolist())\n NPAImageFloat = NPAImageFloat2Flat.reshape(NPAImageFloat.shape)\n EDVerbose.DEBUG(\"Equalize: min= %f, max= %f\" % (NPAImageFloat.min(), NPAImageFloat.max()))\n\n#Gamma and logarithm scale\n if ((self.log is True) or (self.gamma != 1)) and (NPAImageFloat is None): # then we need the array in float \n if dtype == numpy.uint8:\n NPAImageFloat = self.npaImage.astype(numpy.float32) / 255.0\n elif dtype == numpy.uint16:\n NPAImageFloat = self.npaImage.astype(numpy.float32) / 65535.0\n else:\n NPAImageFloat = self.npaImage.astype(numpy.float32)\n\n if self.log is True:\n NPAImageFloat = numpy.log(1 - NPAImageFloat.min() + NPAImageFloat)\n vmin = NPAImageFloat.min()\n vmax = NPAImageFloat.max()\n NPAImageFloat = (NPAImageFloat - vmin) / (vmax - vmin)\n\n if self.gamma != 1:\n if dtype not in [numpy.uint8, numpy.uint16]:\n vmin = NPAImageFloat.min()\n vmax = NPAImageFloat.max()\n NPAImageFloat = (NPAImageFloat - vmin) / (vmax - vmin)\n NPAImageInt = (255.0 * (NPAImageFloat ** self.gamma)).astype(\"uint8\")\n\n else: #if (self.gamma == 1):\n if NPAImageFloat is None:\n if dtype == numpy.uint8:\n NPAImageInt = self.npaImage\n elif dtype == numpy.uint16:\n NPAImageInt = (self.npaImage / 256).astype(numpy.uint8)\n else: #for float or a signed integer\n vmin = self.npaImage.min()\n vmax = self.npaImage.max()\n NPAImageInt = ((self.npaImage.astype(numpy.float32) - vmin) / (vmax - vmin) * 255.0).astype(numpy.uint8)\n else:\n vmin = NPAImageFloat.min()\n vmax = NPAImageFloat.max()\n EDVerbose.DEBUG(\"EDPluginExecThumbnailv10: NPAImageFloat => NPAImageInt min=%s max =%s\" % (vmin, vmax))\n NPAImageInt = ((NPAImageFloat - vmin) * 255.0 / (vmax - vmin)).astype(numpy.uint8)\n#COnversion back to PIL mode\n if isRGB is True:\n pilOutputImage = Image.fromarray(NPAImageInt, 'RGB')\n else:\n pilOutputImage = Image.fromarray(NPAImageInt, 'L')\n\n if (self.autocontrast is not None):\n pilOutputImage = ImageOps.autocontrast(pilOutputImage, self.autocontrast)\n\n if (self.width is not None) or (self.height is not None):\n if (self.width > 0) and (self.height > 0):\n if self.keepRatio is True:\n# PIL takes care of the ratio\n pilOutputImage.thumbnail((self.width, self.height), Image.ANTIALIAS)\n else:\n pilOutputImage = pilOutputImage.resize((self.width, self.height), Image.ANTIALIAS)\n else:\n if self.width is None:\n pilOutputImage.thumbnail((self.height, self.height), Image.ANTIALIAS)\n elif self.height is None:\n pilOutputImage.thumbnail((self.width, self.width), Image.ANTIALIAS)\n\n if self.invert == True:\n pilOutputImage = ImageOps.invert(pilOutputImage)\n if self.colorize == True:\n pilOutputImage.putpalette(EDPluginExecThumbnailv10.getPalette())\n pilOutputImage = pilOutputImage.convert(\"RGB\")\n\n self.synchronizeOn()\n if self.format == \"jpg\":\n self.width, self.height = pilOutputImage.size\n if self.width * self.height > ImageFile.MAXBLOCK:\n ImageFile.MAXBLOCK = self.width * self.height\n try:\n pilOutputImage.save(self.output, \"JPEG\", quality=85, optimize=True)\n except TypeError:\n pilOutputImage.save(self.output)\n else:\n pilOutputImage.save(self.output)\n self.synchronizeOff()", "def __init__(self, embed_size):\n super(ImgEncoder, self).__init__()\n model = models.vgg19(pretrained=True)\n in_features = model.classifier[-1].in_features # input size of feature vector\n model.classifier = nn.Sequential(\n *list(model.classifier.children())[:-1]) # remove last fc layer\n\n self.model = model # loaded model without last fc layer\n self.fc = nn.Linear(in_features, embed_size) # feature vector of image", "def getimage(self):", "def transform_image(im, ops):\n\n # Optimize the list of operations\n #\n # IMPORTANT! The optimized operations method doesn't work correctly in\n # a number of cases and therefore has been removed for the moment until\n # those issues can be resolved (hint I think the stack of operations\n # needs to be optimized in reverse).\n #\n # ~ Anthony Blackshaw <[email protected]>, 31 August 2017\n #\n # ops = Variation.optimize_ops(ops)\n\n # Perform the operations\n fmt = {'format': 'jpeg', 'ext': 'jpg'}\n for op in ops:\n\n # Crop\n if op[0] == 'crop':\n im = im.crop([\n int(op[1][3] * im.size[0]), # Left\n int(op[1][0] * im.size[1]), # Top\n int(op[1][1] * im.size[0]), # Right\n int(op[1][2] * im.size[1]) # Bottom\n ])\n\n # Face\n elif op[0] == 'face':\n # If face detection isn't supported ignore the operation\n if not current_app.config['SUPPORT_FACE_DETECTION']:\n continue\n\n # Ensure the image we use to find a face with is RGB format\n face_im = im.convert('RGB')\n\n # Due to performance constraints we don't attempt face\n # recognition on images over 2000x2000 pixels, instead we scale\n # the images within these bounds ahead of the action.\n ratio = 1.0\n if im.size[0] > 2000 or im.size[1] > 2000:\n face_im.thumbnail((2000, 2000), Image.ANTIALIAS)\n ratio = float(im.size[0]) / float(face_im.size[0])\n\n # Attempt to find the face\n face_rect = Variation.find_face(face_im, **op[1])\n\n # If no face is detected there's nothing more to do\n if face_rect is None:\n continue\n\n # Scale the rectangle by the reduced ratio\n if ratio:\n face_rect = [int(d * ratio) for d in face_rect]\n\n # If a face was found crop it from the image\n im = im.crop(face_rect)\n\n # Fit\n elif op[0] == 'fit':\n im.thumbnail(op[1], Image.ANTIALIAS)\n\n # Rotate\n elif op[0] == 'rotate':\n if op[1] == 90:\n im = im.transpose(Image.ROTATE_270)\n\n elif op[1] == 180:\n im = im.transpose(Image.ROTATE_180)\n\n elif op[1] == 270:\n im = im.transpose(Image.ROTATE_90)\n\n # Output\n elif op[0] == 'output':\n fmt = op[1]\n\n # Set the extension for the output and the format required by\n # Pillow.\n fmt['ext'] = fmt['format']\n if fmt['format'] == 'jpg':\n fmt['format'] = 'jpeg'\n\n # Add the optimize flag for JPEGs and PNGs\n if fmt['format'] in ['jpeg', 'png']:\n fmt['optimize'] = True\n\n # Allow gifs to store multiple frames\n if fmt['format'] in ['gif', 'webp']:\n fmt['save_all'] = True\n fmt['optimize'] = True\n\n # Variations are output in web safe colour modes, if the\n # original image isn't using a web safe colour mode supported by\n # the output format it will be converted to one.\n if fmt['format'] == 'gif' and im.mode != 'P':\n im = im.convert('P')\n\n elif fmt['format'] == 'jpeg' and im.mode != 'RGB':\n im = im.convert('RGB')\n\n elif fmt['format'] == 'png' \\\n and im.mode not in ['P', 'RGB', 'RGBA']:\n im = im.convert('RGB')\n\n elif fmt['format'] == 'webp' and im.mode != 'RGBA':\n im = im.convert('RGBA')\n\n return im, fmt", "def process_image(encoded_image, config, thread_id=0):\r\n return image_processing.process_image(encoded_image,\r\n is_training=False,\r\n height=config.image_height,\r\n width=config.image_width,\r\n thread_id=thread_id,\r\n image_format=config.image_format)", "def _image(self):\n print(\"imaging\")\n self.images.append(self.device_control.image())\n yield", "def test_im_file_resize(self):\n self._test_img_resize(IMBackend())", "def encode(self, img):\n with tf.variable_scope('encoder'):\n #conv1 = self.conv_layer(\n # img, [5, 5], [3, 32], stride=2, initializer_type=1, name='conv1')\n #conv2 = self.conv_layer(\n # conv1, [5, 5], [32, 32], stride=2, initializer_type=1, name='conv2')\n conv3 = self.conv_layer(\n img, [5, 5], [self.in_channels, 64], stride=2, initializer_type=1, name='conv3')\n #conv4 = self.conv_bn_layer(\n conv4 = self.conv_layer(\n conv3, [5, 5], [64, 128], stride=2, #is_training=self.is_training,\n initializer_type=1, name='conv4')\n #conv5 = self.conv_bn_layer(\n conv5 = self.conv_layer(\n conv4, [5, 5], [128, 256], stride=2, #is_training=self.is_training,\n initializer_type=1, name='conv5')\n shape = conv5.get_shape().as_list()\n feature_map_size = shape[1]*shape[2]*shape[3]\n conv5_flat = tf.reshape(\n conv5, [-1, feature_map_size], 'conv5_flat')\n #fc6 = self.fc_bn_layer(conv5_flat, 1024, is_training=self.is_training,\n fc6 = self.fc_layer(conv5_flat, 1024,\n initializer_type=1, name='fc6')\n #fc7 = self.fc_layer(fc6, 1024, initializer_type=1, name='fc7')\n return fc6, shape", "def build_filler_images(self):", "def small_image(self):\n pass", "def preprocess_graph(self):\n image = tf.placeholder(\n tf.float32,\n shape=[self.img_h, self.img_w, self.col_channels])\n patches = self.create_patches(image)\n return {'image': image,\n 'patches': patches}", "def test_pil_file_resize(self):\n self._test_img_resize(PILBackend())", "def combine_graphs_vertically(cls, graphs, newgraph):\n\n # Calculate width and height for the new graph\n imgs = [Image.open(f) for f in graphs]\n width, height = (0, 0)\n for i in imgs:\n w, h = getattr(i, \"size\")\n if w > width:\n width = w\n height = height + h\n\n newimg = Image.new(\"RGB\", (width, height))\n y = 0\n for i in imgs:\n w, h = getattr(i, \"size\")\n newimg.paste(i, (0, y))\n y = y + h\n\n newimg.save(newgraph)", "def add_graph(self):\n \n self.cd_sampling = None\n \n if \"CD\" in self.algorithm:\n\n self.add_cd_samples()\n \n if self.num_hidden ==0:\n \n self.cd_sampling = self.get_cd_samples()\n \n if \"CSS\" in self.algorithm and self.mf_steps > 0: \n \n self.add_mf_updates()\n \n elif \"CSS\" in self.algorithm and self.gibbs_steps > 0:\n \n self.add_cd_samples()\n \n if self.num_hidden ==0:\n \n self.cd_sampling = self.get_cd_samples() \n \n self.add_objective()\n\n self.add_grad_updates() \n \n if self.report_p_tilda:\n \n self.add_p_tilda()\n \n self.add_pseudo_cost_measure()\n\n self.optimize = self.optimization_step()", "def image_preprocessor(self, channels, height, width, resize=None, image_augment_fun=None):\n\n def __preprocessor__(image):\n # scale image to [0, 1] or [-1,1]\n image = tf.divide(image, 255.0, name='scale_range')\n # image = tf.subtract(tf.divide(image, 127.5), 1, name='scale_range')\n\n # reshape - note this is determined by how the data is stored in tfrecords, modify with caution\n if self.decode_jpeg:\n # if decode_jpeg is true, the image is already in [height, width, channels] format,\n # thus we only need to consider IMAGE_FORMAT\n if FLAGS.IMAGE_FORMAT == 'channels_first':\n image = tf.transpose(image, perm=(2, 0, 1))\n pass\n else:\n # if decode_jpeg is false, the image is probably in vector format\n # thus we reshape the image according to the stored format provided by IMAGE_FORMAT\n image = tf.reshape(image, (channels, height, width)) \\\n if FLAGS.IMAGE_FORMAT == 'channels_first' else tf.reshape(image, (height, width, channels))\n # resize\n if isinstance(resize, (list, tuple)):\n if FLAGS.IMAGE_FORMAT == 'channels_first':\n image = tf.transpose(\n tf.image.resize_images( # resize only support HWC\n tf.transpose(image, perm=(1, 2, 0)), resize, align_corners=True), perm=(2, 0, 1))\n else:\n image = tf.image.resize_images(image, resize, align_corners=True)\n\n # apply augmentation method if provided\n if image_augment_fun is not None:\n print('Images will be augmented')\n image = image_augment_fun(image)\n\n return image\n\n # do image pre-processing\n if self.num_labels == 0:\n self.dataset = self.dataset.map(\n lambda image_data: __preprocessor__(image_data),\n num_parallel_calls=self.num_threads)\n else:\n self.dataset = self.dataset.map(\n lambda image_data, label: (__preprocessor__(image_data), label),\n num_parallel_calls=self.num_threads)\n\n # write batch shape\n if isinstance(resize, (list, tuple)):\n height, width = resize\n self.batch_shape = [self.batch_size, height, width, channels] \\\n if FLAGS.IMAGE_FORMAT == 'channels_last' else [self.batch_size, channels, height, width]", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def main():\n nb_processed = 0\n for dataset_name in DATASETS:\n print(\"-----------------\")\n print(\"Dataset: '%s'\" % (dataset_name,))\n print(\"-----------------\")\n\n dataset_dir = os.path.join(WRITE_MAIN_DIR, dataset_name)\n if not os.path.isdir(dataset_dir):\n os.makedirs(dataset_dir)\n\n dataset = Dataset([os.path.join(READ_MAIN_DIR, dataset_name)])\n print(\"Found %d images total.\" % (len(dataset.fps),))\n\n errors = []\n\n scale_height, scale_width = SCALES[dataset_name]\n target_aspect_ratio = scale_width / scale_height\n\n # iterate over every image in the current dataset,\n # augment that image N times, add cols/rows until target aspect ratio\n # is reached, resize it (e.g. 64x64), save it\n for img_idx, (image_filepath, image) in enumerate(zip(dataset.fps, dataset.get_images())):\n print(\"[%s] Image %d of %d (%.2f%%)...\" \\\n % (dataset_name, img_idx+1, len(dataset.fps),\n 100*(img_idx+1)/len(dataset.fps)))\n\n # IOErrors during loading of images result here in a None value\n if image is None:\n print(\"Error / None\")\n errors.append((\n image_filepath,\n \"Failed to load image '%s' (idx %d for dataset %s)\" \\\n % (image_filepath, img_idx, dataset_name)\n ))\n else:\n # resize too big images to smaller ones before any augmentation\n # (for performance reasons)\n height = image.shape[0]\n width = image.shape[1]\n aspect_ratio = width / height\n if width > 1000 or height > 1000:\n image = misc.imresize(image, (1000, int(1000 * aspect_ratio)))\n\n # augment image\n # converts augmented versions automatically to float32, 0-1\n augmentations = augment(image, **AUGMENTATIONS[dataset_name])\n\n # create list of original image + augmented versions\n images_aug = [image / 255.0]\n images_aug.extend(augmentations)\n\n # for each augmented version of the images:\n # resize it to target aspect ratio (e.g. same width and height),\n # save it\n for aug_idx, image_aug in enumerate(images_aug):\n image_aug = to_aspect_ratio_add(image_aug, target_aspect_ratio)\n filename = \"{:0>6}_{:0>3}.jpg\".format(img_idx, aug_idx)\n img_scaled = misc.imresize(image_aug, (scale_height, scale_width))\n misc.imsave(os.path.join(dataset_dir, filename), img_scaled)\n\n nb_processed += 1\n\n print(\"Processed %d images with %d errors.\" % (nb_processed, len(errors)))\n for (fp, err) in errors:\n print(\"File %s error:\" % (fp,))\n print(err)\n print(\"Finished.\")", "def __call__(self, results):\n # Image is bgr\n img = results['img'][..., ::-1]\n img = Image.fromarray(img)\n img = self.transform(img)\n img = np.asarray(img)\n img = img[..., ::-1]\n results['img'] = img\n return results", "def decode_plot(model,datapath,img_size=100,vae=False,compare=False,adjust=False,discrepancy=0.1):\n target = set_creation(datapath,img_size=img_size,nrand=5,adjust=adjust)\n model.eval()\n out, _ = model(target.float())\n plt.style.use('seaborn')\n plt.figure(figsize=(14,6*(1+int(compare))),dpi=80)\n for i in range(5):\n # Load and plot original image\n ax1 = plt.subplot(2*(1+int(compare)),5,i+1)\n ax1.imshow(target[i][0],cmap='viridis')\n ax1.set_title('Original image')\n normout = out[i][0]-out[i][0].min()\n normout = normout/normout.max()\n # Plot decoded image\n ax2 = plt.subplot(2*(1+int(compare)),5,6+i)\n ax2.imshow(normout.data,cmap='viridis')\n ax2.set_title('Decoded image'+(' (reference)' if i==0 and compare else ''))\n if compare:\n diff = normout-target[i][0]\n accuracy = len(diff[numpy.array(abs(diff)<discrepancy)])/args.img_size**2*100\n # Plot difference between decoded and original image\n ax3 = plt.subplot(4,5,11+i)\n ax3.imshow((normout-target[i][0]).data,cmap='seismic',vmin=-1,vmax=1)\n ax3.set_title('%.2f%% accuracy'%accuracy)\n # Plot difference between decoded and reference images\n if i==0:\n refout = normout\n ax4 = plt.subplot(4,5,16+i)\n ax4.imshow(abs(normout-refout).data,cmap='OrRd')\n ax4.set_title('Decoded - Reference')\n plt.tight_layout()\n plt.show()", "def process_image(self):\n if not os.path.isfile(self.output_file) or self.gallery.generator.settings[\"GALLERY_REGENERATE_EXISTING\"]:\n \n # Actions should be processed in order of appearance in actions array\n for i in range(len(self.preset[\"actions\"])):\n a = self.preset[\"actions\"][i]\n\n if a[\"type\"] == \"fit\":\n if not \"from\" in a:\n a[\"from\"] = (0.5, 0.5) # crop from middle by default\n\n self.image = ImageOps.fit(self.image, (a[\"width\"], a[\"height\"],), method=Image.ANTIALIAS, centering=a[\"from\"])\n \n if a[\"type\"] == \"greyscale\":\n self.image = ImageOps.grayscale(self.image)\n\n if a[\"type\"] == \"resize\":\n self.image.thumbnail((a[\"width\"], a[\"height\"]), Image.NEAREST)\n \n # TODO: Write other useful transforms here!\n \n\n self.image.save(self.output_file, \"JPEG\")", "def compute_img(self):\r\n self.load_img()\r\n self.check_shape()\r\n self.convert_img()\r\n self.img_computed = True", "def encode_jpg(track_metadata):\n\tprint(\"---- Encoding\", track_metadata.file_name, \"to JPG...\")\n\tnew_file_name = track_metadata.file_name + \".jpg\"\n\tshutil.copy(track_metadata.file_name, new_file_name) #Work only on a copy.\n\tect_command = [\"/home/ruben/encoding/Efficient-Compression-Tool/build/ect\", \"-9\", \"-strip\", \"--mt-deflate\", new_file_name]\n\tprint(ect_command)\n\tprocess = subprocess.Popen(ect_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif(exit_code != 0): #0 is success.\n\t\traise Exception(\"ECT failed with exit code {exit_code}. CERR: {cerr}\".format(exit_code=exit_code, cerr=cerr))\n\n\t#Delete old file.\n\tif os.path.exists(track_metadata.file_name):\n\t\tos.remove(track_metadata.file_name)\n\n\ttrack_metadata.file_name = new_file_name\n\ttrack_metadata.codec = \"jpg\"", "def __init__(self, **kwargs):\n super(ImageExporter, self).__init__(**kwargs)\n # COMPRESS=PACKBITS\n # PIL: TIFF uncompressed, or Packbits, LZW, or JPEG compressed images. In the current version, PIL always writes uncompressed TIFF files\n # http://linfiniti.com/2011/05/gdal-efficiency-of-various-compression-algorithms/\n # predictor for 'DEFLATE' or 'LZW' : 1 or 2\n i_tiff_compression_predictor=2\n # zlevel for 'DEFLATE' : 1 to 9\n i_tiff_compression_zlevel=8\n self.jpg_quality=75\n self.tiff_compression=[]\n self._metadata = []\n if self.reader.metadata_input:\n self.metadata_input=self.reader.metadata_input\n self.tiff_compress = kwargs.get('tiff_compression', \"LZW\")\n self.tiff_compress =self.tiff_compress.upper()\n self.jpg_quality = kwargs.get('jpg_quality', self.jpg_quality)\n if self.jpg_quality < 1 or self.jpg_quality > 95:\n self.jpg_quality=75\n i_tiff_compression_predictor = kwargs.get('tiff_predictor', i_tiff_compression_predictor)\n if i_tiff_compression_predictor < 1 or i_tiff_compression_predictor > 2:\n i_tiff_compression_predictor=2\n i_tiff_compression_zlevel = kwargs.get('tiff_zlevel', i_tiff_compression_zlevel)\n if i_tiff_compression_zlevel < 1 or i_tiff_compression_zlevel > 9:\n i_tiff_compression_predictor=8\n if self.tiff_compress == \"PACKBITS\" :\n self.tiff_compression.append('COMPRESS=PACKBITS')\n elif self.tiff_compress == \"DEFLATE\":\n self.tiff_compression.append('COMPRESS=%s' % 'DEFLATE')\n self.tiff_compression.append('PREDICTOR=%d' % i_tiff_compression_predictor)\n self.tiff_compression.append('ZLEVEL=%d' % i_tiff_compression_zlevel)\n elif self.tiff_compress == \"LZW\":\n self.tiff_compression.append('COMPRESS=%s' % 'LZW')\n self.tiff_compression.append('PREDICTOR=%d' % i_tiff_compression_predictor)\n elif self.tiff_compress == \"NONE\":\n self.tiff_compression.append('COMPRESS=NONE')", "def writeEdge(self, stack_type):\r\n\r\n \"\"\"Set the red channel of self.dispedge equal to 255, and set the blue and green channels equal to\r\n zero where there is an edge pixel\"\"\"\r\n if stack_type==\"xy\":\r\n \r\n self.dispedge[:,:,0]= np.maximum(self.img[self.z_stack], self.shrink[self.z_stack]*255)\r\n self.dispedge[:,:,1]=np.minimum(self.img[self.z_stack], 255-self.shrink[self.z_stack]*255)\r\n self.dispedge[:,:,2]=self.dispedge[:,:,1]\r\n \r\n \r\n \r\n \"\"\"displays the red boundary on the pixmap by using the PIL library\"\"\"\r\n im=Image.fromarray(self.dispedge)\r\n image= ImageQt.ImageQt(im)\r\n image2= QtGui.QImage(image)\r\n pixmap=QtGui.QPixmap.fromImage(image2).scaled(250,250)\r\n #self.lbl2.setPixmap(self.pixmap2)\r\n return pixmap\r\n \r\n elif stack_type==\"xz\":\r\n self.y_dispedge[:,:,0]= np.maximum(self.img[:,self.y_stack,:], self.shrink[:,self.y_stack,:]*255)\r\n self.y_dispedge[:,:,1]=np.minimum(self.img[:,self.y_stack,:], 255-self.shrink[:,self.y_stack,:]*255)\r\n self.y_dispedge[:,:,2]=self.y_dispedge[:,:,1]\r\n \r\n \"\"\"displays the red boundary on the pixmap by using the PIL library\"\"\"\r\n im=Image.fromarray(self.y_dispedge)\r\n image= ImageQt.ImageQt(im)\r\n image2= QtGui.QImage(image)\r\n pixmap=QtGui.QPixmap.fromImage(image2).scaled(250,250)\r\n #self.lbl2.setPixmap(self.pixmap2)\r\n return pixmap\r\n \r\n else:\r\n self.x_dispedge[:,:,0]= np.maximum(self.img[:,:, self.x_stack], self.shrink[:,:,self.x_stack]*255)\r\n self.x_dispedge[:,:,1]=np.minimum(self.img[:,:,self.x_stack], 255-self.shrink[:,:,self.x_stack]*255)\r\n self.x_dispedge[:,:,2]=self.x_dispedge[:,:,1]\r\n \r\n \"\"\"displays the red boundary on the pixmap by using the PIL library\"\"\"\r\n im=Image.fromarray(self.x_dispedge)\r\n image= ImageQt.ImageQt(im)\r\n image2= QtGui.QImage(image)\r\n pixmap=QtGui.QPixmap.fromImage(image2).scaled(250,250)\r\n #self.lbl2.setPixmap(self.pixmap2)\r\n return pixmap", "def compress_image(filename = \"hubble.jpg\", s = 2):\n \n image = imread(filename)/255\n \n toShow = plt.subplot(121)\n \n #Color\n if len(np.shape(image)) == 3:\n #Set up original RGB values\n R = np.array(image[:,:,0])\n G = np.array(image[:,:,1])\n B = np.array(image[:,:,2])\n \n R, errR = svd_approx(R,s)\n G, errG = svd_approx(G,s)\n B, errB = svd_approx(B,s)\n imageF = np.dstack((R,G,B))\n err = errR + errG + errB\n toShow.imshow(imageF)\n toShow.set_title(\"New \" + str(err) + \", so \" + str((image.size-err)) + \" \\\"saved\\\"\")\n toShow.axis(\"off\")\n \n toShow = plt.subplot(122)\n toShow.set_title(\"Original \" + str(image.size))\n toShow = plt.imshow(image)\n #Gray\n else:\n imageF, err = svd_approx(image, s)\n# print(np.shape(imageF))\n toShow.imshow(imageF, cmap = \"gray\")\n toShow.set_title(\"New \" + str(err) + \", so \" + str((image.size-err)) + \" \\\"saved\\\"\")\n toShow.axis(\"off\")\n toShow = plt.subplot(122)\n toShow.set_title(\"Original \" + str(image.size))\n toShow = plt.imshow(image, cmap = \"gray\")\n \n plt.suptitle(\"MY PLOTS: \" + str((image.size-err)) + \" \\\"saved\\\" :p\")\n \n return\n raise NotImplementedError(\"Problem 5 Incomplete\")", "def update_image(self, image):\n raise NotImplementedError()", "def update_image(self):\n self.image = Image.fromarray(self.img)", "def augment_graph(self):\n tensors, _ = self.select_tensors_to_calibrate(self.model)\n reshape_shape_name = str(uuid.uuid4())\n reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.int64), reshape_shape_name)\n self.model.graph.initializer.append(reshape_shape)\n\n def add_reduce_min_max(tensor_name, reduce_op_name):\n # When doing ReduceMax/ReduceMin, ORT can't reduce on dim with value of 0 if 'keepdims' is false.\n # To make the code simple, we always let keepdims to be 1.\n keepdims = 1\n\n # Adding ReduceMin/ReduceMax nodes: ReduceMin/ReduceMax -> Reshape-> (output)\n reduce_output = tensor_name + \"_\" + reduce_op_name\n intermediate_output = reduce_output + \"_Reshape\"\n reduce_node = onnx.helper.make_node(\n reduce_op_name, [tensor_name], [intermediate_output], keepdims=keepdims, name=reduce_output\n )\n\n reshape_node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[intermediate_output, reshape_shape_name],\n outputs=[reduce_output],\n name=intermediate_output,\n )\n\n self.model.graph.node.extend([reduce_node, reshape_node])\n self.model.graph.output.append(helper.make_tensor_value_info(reduce_output, TensorProto.FLOAT, [1]))\n\n for tensor in tensors:\n add_reduce_min_max(tensor, \"ReduceMin\")\n add_reduce_min_max(tensor, \"ReduceMax\")\n\n onnx.save(\n self.model,\n self.augmented_model_path,\n save_as_external_data=self.use_external_data_format,\n )", "def __enhance_image(self, img):\n\n blue = self.g.clahe.apply(img[:,:,0])\n green = self.g.clahe.apply(img[:,:,1])\n red = self.g.clahe.apply(img[:,:,2])\n img[:,:,0] = blue\n img[:,:,1] = green\n img[:,:,2] = red\n return img", "def _apply_encoder(self, frame, prop, encoder, encoder_type=\"category\"):\n pass", "def process(self):\n self.output_image = cv.resize(\n self.input_image,\n (self.WIDTH, self.HEIHGT),\n )\n return self.output_image", "def update_image(self):\n if self.filenames:\n pos = self.slider.value()\n proj, flat, dark, theta = dx.read_aps_32id(self.filenames, proj=(pos, pos+1))\n if self.ffc_correction:\n image = proj[0,:,:].astype(np.float)/flat[0,:,:].astype(np.float)\n else:\n image = proj[0,:,:].astype(np.float)\n self.image_item.setImage(image)", "def _build_graph(self):\n self._setup_placeholders()\n self._embed()\n self.p_emb = tf.concat([self.p_emb, tf.expand_dims(self.em, -1)], -1)\n self._encode()\n self._match()\n self._fuse()\n\n with tf.variable_scope('boundary'):\n self._decode()\n with tf.variable_scope('content'):\n self._content()\n with tf.variable_scope('verif'):\n self._verify()\n\n self._compute_loss()", "def main():\n\n parser = argparse.ArgumentParser(description='codec_compare')\n parser.add_argument('path', metavar='DIR',\n help='path to images folder')\n args = parser.parse_args()\n classpath = args.path\n classname = classpath.split('/')[1]\n\n images = set(listdir_full_path(classpath))\n if len(images) <= 0:\n print \"\\033[91m[ERROR]\\033[0m\" + \" no source files in ./images.\"\n sys.exit(1)\n\n codeclist_full = set(['aom', 'deepcoder', 'deepcoder-lite', 'fuif', 'fvdo', 'hevc', 'kakadu', 'jpeg',\n 'pik', 'tat', 'xavs', 'xavs-fast', 'xavs-median', 'webp'])\n\n bpp_targets = set([0.06, 0.12, 0.25, 0.50, 0.75, 1.00, 1.50, 2.00])\n for image in images:\n width, height, depth = get_dimensions(image, classname)\n name, imgfmt = os.path.splitext(image)\n imgfmt = os.path.basename(image).split(\".\")[-1]\n derivative_images = []\n if classname[:6] == 'classB':\n derivative_images = create_derivatives(image, classname)\n else:\n derivative_images.append((image, imgfmt))\n\n for derivative_image, pix_fmt in derivative_images:\n json_dir = 'metrics'\n mkdir_p(json_dir)\n json_file = os.path.join(json_dir,\n os.path.splitext(os.path.basename(derivative_image))[0] + \".\" + pix_fmt + \".json\")\n # if os.path.isfile(json_file):\n # print \"\\033[92m[JSON OK]\\033[0m \" + json_file\n # continue\n main_dict = dict()\n derivative_image_metrics = dict()\n for codecname in codeclist_full:\n convertflag = 1\n caseflag = pix_fmt\n if (codecname == 'webp' or codecname == 'tat' or 'deepcoder' in codecname) and depth != '8':\n continue\n if 'xavs' in codecname and depth != '8' and depth != '10':\n continue\n if 'classE' in classname and ('tat' in codecname or 'xavs' in codecname or 'deepcoder' in codecname):\n continue\n if codecname == 'kakadu' and classname[:6] == 'classB':\n convertflag = 0\n caseflag = imgfmt\n bpp_target_metrics = dict()\n for bpp_target in bpp_targets:\n print(codecname)\n if codecname == 'aom' and classname[:6] == 'classB':\n # ('AERIAL2' in image or 'CATS' in image or 'XRAY' in image or 'GOLD' in image or 'TEXTURE1' in image):\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + 'av1'\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif codecname == 'kakadu' and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif 'xavs' in codecname and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif codecname == 'fvdo' and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_pgm' + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.pgm')\n original_image = image\n else:\n if codecname == 'fuif' and 'tif' in imgfmt:\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '.tif_' + str(bpp_target) + '_' + pix_fmt + '.' + codecname\n elif codecname == 'webp' or codecname == 'tat':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_yuv420p.' + codecname\n else:\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + pix_fmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image_path = os.path.join('outputs', codecname, 'decoded')\n decoded_image = ''\n for decodedfile in os.listdir(decoded_image_path):\n encoderoot = '_'.join(os.path.splitext(os.path.basename(encoded_image_name))[0].split('_')[:-1])\n if encoderoot in decodedfile:\n if ('tat' in codecname or 'webp' in codecname) and os.path.splitext(os.path.basename(decodedfile))[1] == '.yuv':\n decoded_image = os.path.join('outputs', codecname, 'decoded', decodedfile)\n print(decoded_image)\n if ('tat' not in codecname or 'webp' not in codecname) and os.path.splitext(os.path.basename(decodedfile))[1] != '.yuv':\n decoded_image = os.path.join('outputs', codecname, 'decoded', decodedfile)\n if 'classE' not in classname and 'classB' not in classname and os.path.isfile(decoded_image):\n decoded_image = convert_decoded(decoded_image, width, height, depth, codecname)\n original_image = convert_decoded(derivative_image, width, height, depth, 'reference')\n else:\n original_image = derivative_image\n\n print('Reference:' + original_image)\n print('Encoded:' + encoded_image)\n print('Decoded:' + decoded_image)\n if (os.path.isfile(original_image) and os.path.isfile(decoded_image) and os.path.isfile(encoded_image)):\n if 'classE' in classname:\n metrics = compute_metrics_HDR(original_image, decoded_image, encoded_image, bpp_target,\n codecname, width, height, pix_fmt, depth)\n\n elif 'classB' in classname:\n metrics = compute_metrics(original_image, decoded_image, encoded_image, bpp_target, codecname,\n width, height, pix_fmt)\n else:\n metrics = compute_metrics_SDR(original_image, decoded_image, encoded_image, bpp_target,\n codecname, width,\n height, imgfmt, depth)\n measured_bpp = (os.path.getsize(encoded_image) * 1.024 * 8) / (float((int(width) * int(height))))\n bpp_target_metrics[measured_bpp] = metrics\n else:\n continue\n \n derivative_image_metrics[codecname] = bpp_target_metrics\n main_dict[derivative_image] = derivative_image_metrics\n\n mkdir_p(json_dir)\n with open(json_file, 'w') as f:\n f.write(json.dumps(main_dict, indent=2))", "def run(self):\n self.run_tasks()\n self.images = np.array(self.images)\n self.shapes.extend(self.images.shape[-2:])\n\n self.images = np.reshape(self.images, self.shapes)", "def preprocess(img):\n dim=(227,227)\n resize_width = 224\n resize_height = 224\n\n img=cv2.resize(img,dim)\n #img=cv2.normalize(img,None,alpha=0,beta=1,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)\n img = img.astype(numpy.float32)\n\n #Preprocess image changing the RGB pixel values to\t the values the network needs\n # to do this we subtract the mean and multiply the std for each channel (R, G and B)\n # these mean and std values come from the stat.txt file that must accompany the\n # graph file for the network.\n \n img[:,:,0] = (img[:,:,0] - gNetworkMean[0])\n img[:,:,1] = (img[:,:,1] - gNetworkMean[1])\n img[:,:,2] = (img[:,:,2] - gNetworkMean[2])\n \n\n # Finally we return the values as Float16 rather than Float32 as that is what the network expects.\n cv2.imshow(\"Frame\", img)\n return img.astype('float16') #age_float_array.astype(numpy.float16)", "def main():\n me = SimpleImage(\"images/me.JPG\")\n dinosaur = SimpleImage(\"images/dinosaur.jpg\")\n\n dinosaur.make_as_big_as(me)\n combine = magic(me, dinosaur)\n combine.show()", "def patch_resize(c, graph, node_select):\r\n return c", "def __init__(self, embed_size):\n super(ImgAttentionEncoder, self).__init__()\n vggnet_feat = models.vgg19(pretrained=True).features\n modules = list(vggnet_feat.children())[:-2]\n self.cnn = nn.Sequential(*modules)\n self.fc = nn.Sequential(nn.Linear(self.cnn[-3].out_channels, embed_size),\n nn.Tanh()) # feature vector of image", "def handle_image(name):\n from_path = args.from_dir + name\n to_path = args.to_dir + name\n\n if width != args.width:\n subprocess.call('jpegtran -rotate 90 -grayscale ' + from_path + ' > ' \\\n + to_path, shell=True)\n else:\n subprocess.call('jpegtran -grayscale ' + from_path + ' > ' + to_path,\\\n shell=True)", "def paint(self, event):\r\n width, height = self.imageView.Size\r\n dimension = min(width, height)\r\n\r\n if dimension < self.image.dimension:\r\n resizeQuality = wx.IMAGE_QUALITY_BICUBIC\r\n elif dimension < self.image.dimension * 2:\r\n resizeQuality = wx.IMAGE_QUALITY_BILINEAR\r\n else:\r\n resizeQuality = wx.IMAGE_QUALITY_NORMAL\r\n\r\n image = self.image.image().Scale(dimension, dimension, resizeQuality)\r\n\r\n self.imageView.Refresh()\r\n\r\n dc = wx.AutoBufferedPaintDC(self.imageView)\r\n dc.Clear()\r\n dc.DrawBitmap(wx.Bitmap(image),\r\n (width - dimension) // 2,\r\n (height - dimension) // 2)", "def _handle_image_descriptors(self):\n while self.file_content[self.data_idx] == 0x2c:\n img_left = self.file_content[self.data_idx + 1] + \\\n (self.file_content[self.data_idx + 2] << 8)\n img_top = self.file_content[self.data_idx + 3] + \\\n (self.file_content[self.data_idx + 4] << 8)\n img_width = self.file_content[self.data_idx+5] + \\\n (self.file_content[self.data_idx + 6] << 8)\n #img_height = self.file_content[self.data_idx+7] + \\\n # (self.file_content[self.data_idx + 8] << 8)\n flags = self.file_content[self.data_idx + 9]\n local_col_table_flag = (flags & 0b10000000) != 0\n #interlace_flag = (flags & 0b01000000) != 0\n self.data_idx = self.data_idx + 10\n if local_col_table_flag:\n # read local color table\n print('read local color table. Not implemented yet')\n\n self.lzw_min_code_sz = self.file_content[self.data_idx]\n self.data_idx = self.data_idx + 1\n\n pix_xix = img_left\n pix_yix = img_top\n subblock_data = []\n while self.file_content[self.data_idx] != 0:\n subblock_sz = self.file_content[self.data_idx]\n self.data_idx = self.data_idx + 1\n subblock_data += self.file_content[self.data_idx:self.data_idx + subblock_sz]\n self.data_idx = self.data_idx + subblock_sz\n self.data_idx = self.data_idx + 1\n dec_data = self.decode_subblock(subblock_data)\n for dat in dec_data:\n self.output_image[pix_xix][pix_yix][0] = self.color_table[dat][0]\n self.output_image[pix_xix][pix_yix][1] = self.color_table[dat][1]\n self.output_image[pix_xix][pix_yix][2] = self.color_table[dat][2]\n pix_xix = pix_xix + 1\n if pix_xix == img_left + img_width:\n pix_xix = img_left\n pix_yix = pix_yix + 1", "def process_image(overviews, db_graph, input_filename, color, out_raster_srs):\n if verbose > 0:\n print(\"~~~process_image\")\n input_image = gdal.Open(input_filename)\n stem = Path(input_filename).stem\n if not(\"dataSet\" in overviews):\n overviews['dataSet'] = {}\n overviews['dataSet']['boundingBox'] = {}\n overviews['dataSet']['limits'] = {}\n\n tile_limits = get_tile_limits(input_filename)\n\n if not(\"LowerCorner\" in overviews['dataSet']['boundingBox']):\n overviews['dataSet']['boundingBox'] = tile_limits\n else:\n if tile_limits['LowerCorner'][0] < overviews['dataSet']['boundingBox']['LowerCorner'][0]:\n overviews['dataSet']['boundingBox']['LowerCorner'][0] = tile_limits['LowerCorner'][0]\n if tile_limits['LowerCorner'][1] < overviews['dataSet']['boundingBox']['LowerCorner'][1]:\n overviews['dataSet']['boundingBox']['LowerCorner'][1] = tile_limits['LowerCorner'][1]\n if tile_limits['UpperCorner'][0] > overviews['dataSet']['boundingBox']['UpperCorner'][0]:\n overviews['dataSet']['boundingBox']['UpperCorner'][0] = tile_limits['UpperCorner'][0]\n if tile_limits['UpperCorner'][1] > overviews['dataSet']['boundingBox']['UpperCorner'][1]:\n overviews['dataSet']['boundingBox']['UpperCorner'][1] = tile_limits['UpperCorner'][1]\n\n # for z in tiles:\n for tile_z in range(overviews['level']['min'], overviews['level']['max'] + 1):\n print('Niveau de zoom : ', tile_z)\n\n resolution = overviews['resolution'] * 2 ** (overviews['level']['max'] - tile_z)\n\n MinTileCol = \\\n math.floor(round((tile_limits['LowerCorner'][0] - overviews['crs']['boundingBox']['xmin'])/(resolution*overviews['tileSize']['width']),8))\n MinTileRow = \\\n math.floor(round((overviews['crs']['boundingBox']['ymax']-tile_limits['UpperCorner'][1])/(resolution*overviews['tileSize']['height']),8))\n MaxTileCol = \\\n math.ceil(round((tile_limits['UpperCorner'][0] - overviews['crs']['boundingBox']['xmin'])/(resolution*overviews['tileSize']['width']),8)) - 1\n MaxTileRow = \\\n math.ceil(round((overviews['crs']['boundingBox']['ymax']-tile_limits['LowerCorner'][1])/(resolution*overviews['tileSize']['height']),8)) - 1\n\n if not( str(tile_z) in overviews['dataSet']['limits'] ):\n overviews['dataSet']['limits'][str(tile_z)] = {\n 'MinTileCol': MinTileCol,\n 'MinTileRow': MinTileRow,\n 'MaxTileCol': MaxTileCol,\n 'MaxTileRow': MaxTileRow,\n }\n\n else:\n if MinTileCol < overviews['dataSet']['limits'][str(tile_z)]['MinTileCol']:\n overviews['dataSet']['limits'][str(tile_z)]['MinTileCol'] = MinTileCol\n if MinTileRow < overviews['dataSet']['limits'][str(tile_z)]['MinTileRow']:\n overviews['dataSet']['limits'][str(tile_z)]['MinTileRow'] = MinTileRow\n if MaxTileCol > overviews['dataSet']['limits'][str(tile_z)]['MaxTileCol']:\n overviews['dataSet']['limits'][str(tile_z)]['MaxTileCol'] = MaxTileCol\n if MaxTileRow > overviews['dataSet']['limits'][str(tile_z)]['MaxTileRow']:\n overviews['dataSet']['limits'][str(tile_z)]['MaxTileRow'] = MaxTileRow\n\n for tile_x in range(MinTileCol, MaxTileCol + 1): \n for tile_y in range(MinTileRow, MaxTileRow + 1):\n # on cree une image 3 canaux pour la tuile\n opi = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n # on reech l'OPI dans cette image\n gdal.Warp(opi, input_image)\n # si necessaire on cree le dossier de la tuile\n tile_dir = args.cache+'/'+str(tile_z)+'/'+str(tile_y)+'/'+str(tile_x)\n Path(tile_dir).mkdir(parents=True, exist_ok=True)\n # on export en jpeg (todo: gerer le niveau de Q)\n PNG_DRIVER.CreateCopy(tile_dir+\"/\"+stem+\".png\", opi)\n # on cree une image mono canal pour la tuile\n mask = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n # on rasterise la partie du graphe qui concerne ce cliche\n gdal.Rasterize(mask, db_graph,\n SQLStatement='select geom from ' + args.table + ' where cliche = \\''+stem+'\\' ')\n img_mask = mask.GetRasterBand(1).ReadAsArray()\n # si le mask est vide, on a termine\n val_max = np.amax(img_mask)\n if val_max > 0:\n # on cree le graphe et l'ortho\n ortho = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n graph = create_blank_tile(overviews, {'x': tile_x, 'y': tile_y, 'resolution': resolution}, 3, out_raster_srs)\n if Path(tile_dir+\"/ortho.png\").is_file():\n existing_ortho = gdal.Open(tile_dir+\"/ortho.png\")\n existing_graph = gdal.Open(tile_dir+\"/graph.png\")\n else:\n existing_ortho = False\n existing_graph = False\n for i in range(3):\n opi_i = opi.GetRasterBand(i+1).ReadAsArray()\n if existing_ortho:\n ortho_i = existing_ortho.GetRasterBand(i+1).ReadAsArray()\n else:\n ortho_i = ortho.GetRasterBand(i+1).ReadAsArray()\n opi_i[(img_mask == 0)] = 0\n ortho_i[(img_mask != 0)] = 0\n ortho.GetRasterBand(i+1).WriteArray(np.add(opi_i, ortho_i))\n if existing_graph:\n graph_i = existing_graph.GetRasterBand(i+1).ReadAsArray()\n else:\n graph_i = graph.GetRasterBand(i+1).ReadAsArray()\n graph_i[(img_mask != 0)] = color[i]\n graph.GetRasterBand(i+1).WriteArray(graph_i)\n existing_ortho = None\n existing_graph = None\n PNG_DRIVER.CreateCopy(tile_dir+\"/ortho.png\", ortho)\n PNG_DRIVER.CreateCopy(tile_dir+\"/graph.png\", graph)", "def modify_image(self, example, target_label):\n raise NotImplementedError()", "def compress(self, tensor):", "def img_recolor(self, args, input_image_path):\n \n ec = encoder.Encoder(output_path=args.intermediate_representation, method=args.method,\n size=args.size, p=args.p, grid_size=args.grid_size, plot=args.plot, quantize=args.quantize)\n dc = decoder.Decoder(output_path=args.output_path, method=args.method, size=args.size, p=args.p, gpu_id=args.gpu_id, plot=args.plot)\n\n ec.encode(input_image_path)\n img_gray_name = ar_utils.gen_new_gray_filename(input_image_path)\n img_gray_path = os.path.join(args.intermediate_representation, img_gray_name)\n dc.decode(img_gray_path)\n\n if args.delete_gray and os.path.exists(img_gray_path):\n os.remove(img_gray_path)", "def run_visualization(image):\n # for image in images:\n try:\n with tf.gfile.FastGFile(image, 'rb') as f:\n jpeg_str = f.read()\n original_im = Image.open(BytesIO(jpeg_str))\n except IOError:\n print('Cannot retrieve image')\n return\n\n # print('running deeplab on image {0}'.format(image))\n resized_im, seg_map = MODEL.run(original_im)\n seg_map = seg_map.astype(np.uint8) * 255\n resized_im = np.array(resized_im, dtype=np.uint8)\n resized_im = cv2.cvtColor(resized_im, cv2.COLOR_BGR2RGB)\n # vis_segmentation(resized_im, seg_map,FULL_COLOR_MAP ,LABEL_NAMES)\n overlay_image = cv2.addWeighted(resized_im, 0.8, cv2.merge((seg_map * 0, seg_map, seg_map * 0)), 0.2, 0)\n # time.sleep(params.SEC_BETWEEN_PREDICTION)\n\n return resized_im, seg_map, overlay_image.astype(np.uint8)", "def augmentation_nine(filename, aug_type, max_H, max_W, folder=CONFIG.data_folder):\r\n\r\n # image = rgb2grey(mpimg.imread(os.path.join(folder, filename)))\r\n\r\n # rotating a 214 pixel image by 2 deg yield 8 more pixels\r\n image_augmented = np.ones(shape=(max_H, max_W))\r\n image = Image.open(os.path.join(folder, filename))\r\n image = image.convert('RGB')\r\n # note that Image read rgb imgs as 0-255\r\n #################################\r\n # aug_type = 8\r\n\r\n w_ori, h_ori = image.size\r\n\r\n rotate_ind = aug_type % 3\r\n scale_ind = aug_type // 3\r\n\r\n image = PIL.ImageOps.invert(image)\r\n if rotate_ind == 1:\r\n image = image.rotate(2, expand=True)\r\n elif rotate_ind == 2:\r\n image = image.rotate(-2, expand=True)\r\n image = PIL.ImageOps.invert(image)\r\n\r\n h, w = image.size\r\n\r\n if scale_ind == 1:\r\n h, w = np.int(np.floor(h * 0.98)), np.int(np.floor(w * 0.98))\r\n image = image.resize((h, w))\r\n elif scale_ind == 2:\r\n h, w = np.int(np.floor(h * 0.96)), np.int(np.floor(w * 0.96))\r\n image = image.resize((h, w))\r\n\r\n # put image there. 9 images in total. this enhalts shifting.\r\n # scale to (0-1)\r\n image = rgb2grey(np.array(image) / 255)\r\n\r\n h, w = np.shape(image)\r\n\r\n stride_0, stride_1 = (max_H - 10 - h_ori) // 2, (max_W - 10 - w_ori) // 2\r\n offset = ((aug_type % 3) * stride_0, (aug_type % 3) * stride_1)\r\n try:\r\n image_augmented[offset[0]: h + offset[0], offset[1]: w + offset[1]] = image\r\n except ValueError:\r\n print(filename)\r\n\r\n return image_augmented", "def add_image(self, f_name,file,new_id):\r\n folder=tempfile.mktemp()\r\n os.mkdir(folder)\r\n datei=open(folder+'/'+f_name,'w+')\r\n datei.write(file.read())\r\n datei.close()\r\n val='' \r\n liste_ext=liste_val\r\n if(self.toolbox.hasProperty('eigene_formate')):\r\n self_val=self.toolbox.getProperty('eigene_formate').split(',')\r\n liste_ext=[]\r\n for x in self_val:\r\n liste_ext.append('_'+x+'.jpeg')\r\n for extension in liste_ext:\r\n #cmd='/usr/bin/convert '+folder+'/'+f_name+' -resize '+extension[1:-4]+'x'+extension[1:-4]+' '+folder+'/'+new_id+extension\r\n cmd='/usr/bin/convert '+folder+'/'+f_name+' -resize '+extension[1:-4]+' '+folder+'/'+new_id+extension\r\n order=os.popen(cmd).read()\r\n kurz_name='_'+str(f_name.split('.')[0])\r\n kurz_name=kurz_name.replace(' ','_')\r\n val=val+self.manage_addImage(id=new_id+kurz_name+extension,file=open(folder+'/'+new_id+extension),title=f_name, precondition='', content_type='',REQUEST=None)+' ' \r\n os.remove(folder+'/'+new_id+extension)\r\n os.remove(folder+'/'+f_name)\r\n os.rmdir(folder)\r\n txt=\"Datei Hochgeladen!<br>\"\r\n #my_root=self.toolbox\r\n #txt+=my_root.id+\"<br>\"\r\n #if(my_root.hasProperty('eigene_formate')):\r\n # txt+=my_root.getProperty('eigene_formate')+\"<br>\"\r\n return txt", "def add_png_decoding(input_width, input_height, input_depth):\n base64_str = tf.placeholder(tf.string, name='input_string')\n input_str = tf.decode_base64(base64_str)\n decoded_image = tf.image.decode_png(input_str, channels=input_depth)\n # Convert from full range of uint8 to range [0,1] of float32.\n decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,\n tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n resize_shape = tf.stack([input_height, input_width])\n resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)\n resized_image = tf.image.resize_bilinear(decoded_image_4d,\n resize_shape_as_int)\n tf.identity(resized_image, name=\"DecodePNGOutput\")\n return input_str, resized_image" ]
[ "0.6741079", "0.6175979", "0.6148263", "0.59459597", "0.59365785", "0.58881974", "0.58312756", "0.58165944", "0.56556207", "0.56259316", "0.5575787", "0.55362195", "0.55134434", "0.54918855", "0.5491029", "0.54815876", "0.5451435", "0.5449056", "0.5448799", "0.5446489", "0.54234874", "0.54153687", "0.54134536", "0.5407508", "0.53719753", "0.53681713", "0.5352389", "0.5350282", "0.53464985", "0.53119856", "0.5307585", "0.5280521", "0.5275544", "0.5274483", "0.52708346", "0.52677035", "0.5260692", "0.52599895", "0.5251053", "0.5237333", "0.5235547", "0.52348596", "0.5231995", "0.5214596", "0.5209954", "0.5209954", "0.51914203", "0.51863956", "0.5173829", "0.5159295", "0.51585084", "0.5149895", "0.5136398", "0.51307166", "0.5130353", "0.51173145", "0.51079565", "0.50982916", "0.5097301", "0.5096404", "0.50913626", "0.5073744", "0.5072237", "0.50665134", "0.50656974", "0.50458544", "0.50439274", "0.5043671", "0.503699", "0.5027496", "0.5027272", "0.50213337", "0.50179565", "0.5017864", "0.500142", "0.5000287", "0.49986458", "0.49962923", "0.49951136", "0.49931628", "0.49888653", "0.4988313", "0.49842137", "0.49768075", "0.49763465", "0.497048", "0.4954053", "0.49501994", "0.49488634", "0.49425828", "0.49373317", "0.49340475", "0.49330142", "0.49216527", "0.49188945", "0.49116147", "0.49104118", "0.4906942", "0.49062958", "0.4905687", "0.49035758" ]
0.0
-1
Creates input_fn according to parameters
def get_input_fn(input_file_names, batch_size=1, num_epochs=None, shuffle=False, shard_size=3000, return_full_size_image=False): def parse_fn(example): """Parse TFExample records and perform simple data augmentation.""" example_fmt = { "image": tf.FixedLenFeature((), tf.string), "target": tf.FixedLenFeature((), tf.float32, -1) } parsed = tf.parse_single_example(example, example_fmt) if return_full_size_image: preprocessed_image, full_size_image = _image_preprocess_fn( image_buffer=parsed["image"], input_height=299, input_width=299, input_mean=128, input_std=128, return_full_size_image=True) return preprocessed_image, parsed["target"], full_size_image preprocessed_image = _image_preprocess_fn(image_buffer=parsed["image"], input_height=299, input_width=299, input_mean=128, input_std=128) return preprocessed_image, parsed["target"] def input_fn(): file_names = tf.constant(input_file_names, dtype=tf.string, name='input_file_names') if shuffle: num_shards = len(input_file_names) files = tf.data.Dataset.from_tensor_slices(file_names).shuffle(num_shards) dataset = files.interleave(tf.data.TFRecordDataset, cycle_length=3) dataset = dataset.shuffle(buffer_size=shard_size*2) else: dataset = tf.data.TFRecordDataset(file_names) dataset = dataset.map(map_func=parse_fn, num_parallel_calls=FLAGS.num_parallel_img_parsers) dataset = dataset.batch(batch_size=batch_size) dataset = dataset.repeat(num_epochs) # the input is repeated indefinitely if num_epochs is None dataset = dataset.prefetch(buffer_size=64) # print("Dataset ouput types: {}".format(dataset.output_types)) # print("Dataset ouput shapes: {}".format(dataset.output_shapes)) iterator = dataset.make_one_shot_iterator() return iterator.get_next() return input_fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def input_fn(sources, train, params):\n \n raise NotImplementedError", "def _input_fn(input_pipeline_context=None):\n return _create_dataset(options, is_training, input_pipeline_context)", "def generate_input_fn(mode='TRAIN'):\n mode = mode.upper()\n if mode == 'TRAIN' or mode == 'EVAL':\n return input_fn\n elif mode == 'PREDICT' or mode == 'NOISE':\n return noise_input_fn\n else:\n raise ValueError('Incorrect mode provided')", "def get_input_fn(is_train):\n d = DataInfo(ddir,evalddir)\n hparams = d.generate()\n params = utils.Params(**hparams)\n\n if is_train:\n input_fn = data.get_input_fn(dataset_fn=data.get_train_dataset, mode=TRAIN, params=params, shuffle_queue=10000, repeat=False)\n \n else:\n input_fn = data.get_input_fn(dataset_fn=data.get_eval_dataset, mode=EVAL, params=params, shuffle_queue=10000, repeat=False)\n \n return input_fn, params", "def input_fn(self, params, mode=tf.estimator.ModeKeys.TRAIN):\n raise NotImplementedError()", "def input_fn(params=None):\n del params\n if is_tpu:\n features = get_input_fn_dataset(pattern, flags, batch_size)()[0]\n else:\n features = get_input_fn_queue(pattern, flags, batch_size)()[0]\n\n if flags.color_data_augment:\n\n def augment_img(image):\n image = tf.image.random_hue(image, .5)\n return image\n\n features[IMAGE_FEATURE_NAME] = tf.map_fn(\n augment_img, features[IMAGE_FEATURE_NAME], parallel_iterations=32)\n\n return features, None", "def build_training_input_fn():\n transformed_metadata = metadata_io.read_metadata(\n os.path.join(\n MODEL_DIR, transform_fn_io.TRANSFORMED_METADATA_DIR))\n transformed_feature_spec = transformed_metadata.schema.as_feature_spec()\n\n def input_fn():\n \"\"\"Input function for training and eval.\"\"\"\n dataset = tf.contrib.data.make_batched_features_dataset(\n file_pattern=os.path.join(TFRECORD_DIR, '*'),\n batch_size=BATCH_SIZE,\n features=transformed_feature_spec,\n reader=tf.data.TFRecordDataset,\n shuffle=True)\n transformed_features = dataset.make_one_shot_iterator().get_next()\n # Extract features and labels from the transformed tensors.\n label_cols = set(['TotalVolume', 'Density', 'Temperature', 'Humidity', 'Energy', 'Problems'])\n transformed_labels = {key: value for (key, value) in transformed_features.items() if key in label_cols}\n transformed_features = {key: value for (key, value) in transformed_features.items() if key not in label_cols}\n return transformed_features, transformed_labels\n\n return input_fn", "def _input_fn(params):\n batch_size = params['batch_size']\n inputs, labels, lengths = sequence_example_lib.get_fake_data_batch(\n batch_size, input_size, padding_length)\n\n features = {\n 'inputs': inputs,\n 'lengths': lengths\n }\n return features, labels", "def from_input_fn(return_values):\n if isinstance(return_values, dataset_ops.Dataset):\n dataset = return_values\n return _Inputs(dataset=dataset)\n\n features, labels = _Inputs._parse_inputs(return_values)\n return _Inputs(features, labels)", "def _make_train_input_fn(is_classification):\n\n def _input_fn():\n features = dict(FEATURES_DICT)\n if is_classification:\n labels = CLASSIFICATION_LABELS\n else:\n labels = REGRESSION_LABELS\n return features, labels\n\n return _input_fn", "def get_input_fn(pattern, flags, batch_size, is_tpu):\n\n def input_fn(params=None):\n \"\"\"Calls the appropriate input_fn and augments the data.\"\"\"\n del params\n if is_tpu:\n features = get_input_fn_dataset(pattern, flags, batch_size)()[0]\n else:\n features = get_input_fn_queue(pattern, flags, batch_size)()[0]\n\n if flags.color_data_augment:\n\n def augment_img(image):\n image = tf.image.random_hue(image, .5)\n return image\n\n features[IMAGE_FEATURE_NAME] = tf.map_fn(\n augment_img, features[IMAGE_FEATURE_NAME], parallel_iterations=32)\n\n return features, None\n\n return input_fn", "def input_fn_builder(features, seq_length):\n\n all_unique_ids = []\n all_input_ids = []\n all_input_mask = []\n all_input_type_ids = []\n\n for feature in features:\n all_unique_ids.append(feature.unique_id)\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_input_type_ids.append(feature.input_type_ids)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"unique_ids\":\n tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),\n \"input_ids\":\n tf.constant(all_input_ids, \n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_type_ids\":\n tf.constant(all_input_type_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n })\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d\n\n return input_fn", "def _input_fn(params):\n # Retrieves the batch size for the current shard. The # of shards is\n # computed according to the input pipeline deployment. See\n # `tf.contrib.tpu.RunConfig` for details.\n batch_size = params['batch_size']\n inputs, labels, lengths = sequence_example_lib.get_padded_batch(\n file_paths, batch_size, input_size, padding_length)\n features = {\n 'inputs': inputs,\n 'lengths': lengths,\n }\n return features, labels", "def build_input_fn(mode,\n hparams,\n data_file_patterns=None,\n num_datashards=None,\n fixed_problem=None,\n worker_replicas=None,\n worker_id=None):\n\n def input_fn():\n \"\"\"Supplies input to our model.\n\n This function supplies input to our model, where this input is a\n function of the mode. For example, we supply different data if\n we're performing training versus evaluation.\n\n Returns:\n A tuple consisting of 1) a dictionary of tensors whose keys are\n the feature names, and 2) a tensor of target labels if the mode\n is not INFER (and None, otherwise).\n\n Raises:\n ValueError: if one of the parameters has an unsupported value.\n \"\"\"\n problem_count, batches = len(hparams.problems), []\n with tf.name_scope(\"input_reader\"):\n for n in xrange(problem_count):\n if fixed_problem is not None and n != fixed_problem:\n continue\n problem_instance = hparams.problem_instances[n]\n p_hparams = hparams.problems[n]\n with tf.name_scope(\"problem_%d\" % n):\n with tf.device(\"/cpu:0\"): # Input reading on CPU\n capacity = (\n p_hparams.max_expected_batch_size_per_shard * num_datashards)\n feature_map = data_reader.input_pipeline(\n problem_instance, data_file_patterns and data_file_patterns[n],\n capacity, mode, hparams,\n data_reader.hparams_to_batching_scheme(\n hparams,\n shard_multiplier=num_datashards,\n drop_long_sequences=(mode == tf.contrib.learn.ModeKeys.TRAIN\n or hparams.eval_drop_long_sequences),\n length_multiplier=(p_hparams.batch_size_multiplier)))\n\n # Reverse inputs and targets features if the problem was reversed.\n if problem_instance is not None:\n problem_instance.maybe_reverse_features(feature_map)\n problem_instance.maybe_copy_features(feature_map)\n else:\n if p_hparams.was_reversed:\n inputs = feature_map[\"inputs\"]\n targets = feature_map[\"targets\"]\n feature_map[\"inputs\"] = targets\n feature_map[\"targets\"] = inputs\n # Use the inputs as the targets if the problem is a copy problem.\n if p_hparams.was_copy:\n feature_map[\"targets\"] = feature_map[\"inputs\"]\n\n # Ensure inputs and targets are proper rank.\n while len(feature_map[\"inputs\"].get_shape()) != 4:\n feature_map[\"inputs\"] = tf.expand_dims(feature_map[\"inputs\"], axis=-1)\n while len(feature_map[\"targets\"].get_shape()) != 4:\n feature_map[\"targets\"] = tf.expand_dims(\n feature_map[\"targets\"], axis=-1)\n\n batches.append((feature_map[\"inputs\"], feature_map[\"targets\"],\n tf.constant(n), tf.constant(p_hparams.input_space_id),\n tf.constant(p_hparams.target_space_id)))\n\n # We choose which problem to process.\n loss_moving_avgs = [] # Need loss moving averages for that.\n for n in xrange(problem_count):\n with tf.variable_scope(\"losses_avg\"):\n loss_moving_avgs.append(\n tf.get_variable(\n \"problem_%d/total_loss\" % n, initializer=100.0,\n trainable=False))\n if fixed_problem is None:\n if (hparams.problem_choice == \"uniform\" or\n mode != tf.contrib.learn.ModeKeys.TRAIN):\n problem_choice = tf.random_uniform(\n [], maxval=problem_count, dtype=tf.int32)\n elif hparams.problem_choice == \"adaptive\":\n loss_moving_avgs = tf.stack(loss_moving_avgs)\n problem_choice = tf.multinomial(\n tf.reshape(loss_moving_avgs, [1, -1]), 1)\n problem_choice = tf.to_int32(tf.squeeze(problem_choice))\n elif hparams.problem_choice == \"distributed\":\n assert worker_replicas >= problem_count\n assert worker_replicas % problem_count == 0\n problem_choice = tf.to_int32(worker_id % problem_count)\n else:\n raise ValueError(\n \"Value of hparams.problem_choice is %s and must be \"\n \"one of [uniform, adaptive, distributed]\" % hparams.problem_choice)\n\n # Inputs and targets conditional on problem_choice.\n rand_inputs, rand_target, choice, inp_id, tgt_id = cond_on_index(\n lambda n: batches[n], problem_choice, 0, problem_count - 1)\n else:\n problem_choice = tf.constant(fixed_problem)\n # Take the only constructed batch, which is the fixed_problem.\n rand_inputs, rand_target, choice, inp_id, tgt_id = batches[0]\n\n # Set shapes so the ranks are clear.\n rand_inputs.set_shape([None, None, None, None])\n rand_target.set_shape([None, None, None, None])\n choice.set_shape([])\n inp_id.set_shape([])\n tgt_id.set_shape([])\n # Forced shape obfuscation is necessary for inference.\n if mode == tf.contrib.learn.ModeKeys.INFER:\n rand_inputs._shape = tf.TensorShape([None, None, None, None]) # pylint: disable=protected-access\n rand_target._shape = tf.TensorShape([None, None, None, None]) # pylint: disable=protected-access\n\n # Final feature map.\n rand_feature_map = {\n \"inputs\": rand_inputs,\n \"problem_choice\": choice,\n \"input_space_id\": inp_id,\n \"target_space_id\": tgt_id\n }\n if mode == tf.contrib.learn.ModeKeys.INFER:\n rand_feature_map[\"infer_targets\"] = rand_target\n rand_target = None\n # This is because of a bug in the tf.contrib.learn Estimator that\n # short-circuits prediction if it doesn't see a QueueRunner.\n # DummyQueueRunner implements the minimal expected interface but does\n # nothing.\n # TODO(rsepassi): Remove once we move to core Estimator.\n tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS, DummyQueueRunner())\n return rand_feature_map, rand_target\n\n return input_fn", "def serving_input_fn(self):\n label_ids = tf.placeholder(tf.int32, [None], name='label_ids')\n input_ids = tf.placeholder(\n tf.int32, [None, self.params[\"MAX_SEQ_LENGTH\"]], name='input_ids')\n input_mask = tf.placeholder(\n tf.int32, [None, self.params[\"MAX_SEQ_LENGTH\"]], name='input_mask')\n segment_ids = tf.placeholder(\n tf.int32, [None, self.params[\"MAX_SEQ_LENGTH\"]], name='segment_ids')\n input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({\n 'label_ids': label_ids,\n 'input_ids': input_ids,\n 'input_mask': input_mask,\n 'segment_ids': segment_ids})()\n return input_fn", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"unique_ids\":\n tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),\n \"input_ids\":\n tf.constant(all_input_ids, \n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_type_ids\":\n tf.constant(all_input_type_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n })\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d", "def input_fn_builder(input_file, seq_length, is_test, is_training, drop_remainder):\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if not is_test:\n name_to_features[\"label\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n batch_size = params[\"train_batch_size\"]\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n else:\n batch_size = params[\"predict_batch_size\"]\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn", "def _call_input_fn(self, input_fn, mode):\n input_fn_args = util.fn_args(input_fn)\n config = self.config # a deep copy.\n kwargs = {}\n if 'params' in input_fn_args:\n kwargs['params'] = self.params # a deep copy.\n else:\n raise ValueError('input_fn ({}) does not include params argument, '\n 'required by TPUEstimator to pass batch size as '\n 'params[\"batch_size\"]'.format(input_fn))\n if 'config' in input_fn_args:\n kwargs['config'] = config\n\n if 'mode' in input_fn_args:\n kwargs['mode'] = mode\n\n # Records the fact input_fn has been invoked.\n self._is_input_fn_invoked = True\n\n with self._ctx.with_mode(mode) as ctx:\n # Setting the batch size in params first. This helps user to have same\n # input_fn for use_tpu=True/False.\n batch_size_for_input_fn = ctx.batch_size_for_input_fn\n if batch_size_for_input_fn is not None:\n if isinstance(kwargs['params'], hparam.HParams):\n kwargs['params'].add_hparam(_BATCH_SIZE_KEY, batch_size_for_input_fn)\n else:\n kwargs['params'][_BATCH_SIZE_KEY] = batch_size_for_input_fn\n\n # For export_savedmodel, input_fn is never passed to Estimator. So,\n # `is_export_mode` must be False.\n if ctx.is_running_on_cpu(is_export_mode=False):\n with ops.device('/device:CPU:0'):\n return input_fn(**kwargs)\n\n # For TPU computation, input_fn should be invoked in a tf.while_loop for\n # performance. While constructing the tf.while_loop, the structure of\n # inputs returned by the `input_fn` needs to be recorded. The structure\n # includes whether features or labels is dict or single Tensor, dict keys,\n # tensor shapes, and dtypes. The recorded structure is used to create the\n # infeed dequeue ops, which must be wrapped and passed as a Fn, called\n # inside the TPU computation, as the TPU computation is wrapped inside a\n # tf.while_loop also. So, we either pass input_fn to model_fn or pass\n # dequeue_fn to model_fn. Here, `input_fn` is passed directly as\n # `features` in `model_fn` signature.\n def _input_fn():\n return input_fn(**kwargs)\n\n return _input_fn", "def get_function_input(inputs, input_name, optional=False):\n input = inputs.get(input_name)\n\n if input is None and optional is False:\n err = \"'{0}' is a mandatory function input\".format(input_name)\n raise ValueError(err)\n else:\n return input", "def get_eval_input_fn(self, input_name):\n def eval_input_fn():\n # Get filenames\n data_dir = pathlib.Path(self._TEST_DATA_DIR)\n list_ds = tf.data.Dataset.list_files(str(data_dir / '*'))\n\n # Create data pre-processing functions\n funcs = self._get_data_preprocessing_fns()\n\n # Get labeled dataset\n ds = list_ds.map(funcs.process_path, num_parallel_calls=AUTOTUNE)\n # Format conversion\n ds = ds.map(funcs.convert_format, num_parallel_calls=AUTOTUNE)\n # Resizing\n ds = ds.map(funcs.resize, num_parallel_calls=AUTOTUNE)\n\n # Prepare for tf.estimator\n ds = ds.map(lambda img, label: ({input_name: img}, label))\n\n # Batch, prefetch\n ds = ds.batch(self._TEST_BATCH_SIZE)\n ds = ds.prefetch(buffer_size=self._PREFETCH_BUFFER_SIZE)\n\n return ds\n return eval_input_fn", "def input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"This is used to make the proper format of the prediction variable\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n print(params)\n batch_size = 500\n\n num_examples = len(features)\n\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn", "def input_fn_builder(features, seq_length):\n\n all_label_ids = []\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n\n for feature in features:\n all_label_ids.append(feature.label_ids)\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices(\n {\n \"label_ids\": tf.constant(\n all_label_ids, shape=[num_examples], dtype=tf.int32\n ),\n \"input_ids\": tf.constant(\n all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n \"input_mask\": tf.constant(\n all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n \"segment_ids\": tf.constant(\n all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n }\n )\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d\n\n return input_fn", "def input_fn_builder(features, seq_length, is_training, drop_remainder):\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn", "def train_input_fn():\n # Initialize `iterator` with training data.\n train_filenames = [os.path.join(FLAGS.out_dir, FLAGS.train_records)]\n return _input_fn(train_filenames, FLAGS.epochs, FLAGS.batch_size, shuffle=True)", "def input_fn(params):\n\t\tbatch_size = params[\"batch_size\"]\n\t\toutput_buffer_size = batch_size * 1000\n\t\tmax_seq_len = {0: max_q_len, 1:max_p_len}\n\n\n\t\tdef extract_fn(data_record):\n\t\t\tif is_training:\n\t\t\t\tfeatures = {\n\t\t\t\t\t\t\t\"raw_query_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"raw_query_mask\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"raw_query_segment_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"rewrite_query_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"rewrite_query_mask\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"rewrite_query_segment_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"doc_ids0\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"doc_ids1\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"label\": tf.FixedLenFeature([], tf.int64),\n\t\t\t\t\t\t\t}\n\t\t\t\tsample = tf.parse_single_example(data_record, features)\n\n\t\t\t\traw_query_ids = tf.cast(sample[\"raw_query_ids\"][:max_q_len], tf.int32)\n\t\t\t\traw_query_segment_ids = tf.cast(sample[\"raw_query_segment_ids\"][:max_q_len], tf.int32)\n\t\t\t\traw_query_mask = tf.ones_like(raw_query_ids)\n\t\t\t\t# effective_raw_query_mask = tf.ones_like(query_ids)\n\t\t\t\teffective_raw_query_mask = tf.cast(sample[\"raw_query_mask\"][:max_q_len], tf.int32)\n\n\t\t\t\trewrite_query_ids = tf.cast(sample[\"rewrite_query_ids\"][:max_q_len], tf.int32)\n\t\t\t\trewrite_query_segment_ids = tf.cast(sample[\"rewrite_query_segment_ids\"][:max_q_len], tf.int32)\n\t\t\t\trewrite_query_mask = tf.ones_like(rewrite_query_ids)\n\t\t\t\teffective_rewrite_query_mask = tf.cast(sample[\"rewrite_query_mask\"][:max_q_len], tf.int32)\n\n\n\t\t\t\tdoc0_ids = tf.cast(sample[\"doc_ids0\"][:max_p_len], tf.int32)\n\t\t\t\tdoc0_segment_ids = tf.zeros_like(doc0_ids)\n\t\t\t\tdoc0_mask = tf.ones_like(doc0_ids)\n\t\t\t\teffective_doc0_mask = tf.ones_like(doc0_ids)\n\n\n\t\t\t\tdoc1_ids = tf.cast(sample[\"doc_ids1\"][:max_p_len], tf.int32)\n\t\t\t\tdoc1_segment_ids = tf.zeros_like(doc1_ids)\n\t\t\t\tdoc1_mask = tf.ones_like(doc1_ids)\n\t\t\t\teffective_doc1_mask = tf.ones_like(doc1_ids)\n\n\n\t\t\t\tlabel = tf.cast(sample[\"label\"], tf.float32)\n\n\n\t\t\t\tfeatures = {\n\t\t\t\t\t\"raw_query_ids\": raw_query_ids,\n\t\t\t\t\t\"raw_query_segment_ids\": raw_query_segment_ids,\n\t\t\t\t\t\"raw_query_mask\": raw_query_mask,\n\t\t\t\t\t\"effective_raw_query_mask\": effective_raw_query_mask,\n\t\t\t\t\t\"rewrite_query_ids\": rewrite_query_ids,\n\t\t\t\t\t\"rewrite_query_segment_ids\": rewrite_query_segment_ids,\n\t\t\t\t\t\"rewrite_query_mask\": rewrite_query_mask,\n\t\t\t\t\t\"effective_rewrite_query_mask\": effective_rewrite_query_mask,\n\t\t\t\t\t\"doc0_ids\": doc0_ids,\n\t\t\t\t\t\"doc0_segment_ids\": doc0_segment_ids,\n\t\t\t\t\t\"doc0_mask\": doc0_mask,\n\t\t\t\t\t\"effective_doc0_mask\": effective_doc0_mask,\n\t\t\t\t\t\"doc1_ids\": doc1_ids,\n\t\t\t\t\t\"doc1_segment_ids\": doc1_segment_ids,\n\t\t\t\t\t\"doc1_mask\": doc1_mask,\n\t\t\t\t\t\"effective_doc1_mask\": effective_doc1_mask,\n\t\t\t\t\t\"label\": label,\n\t\t\t\t\t}\n\t\t\telse:\n\t\t\t\tif is_output:\n\t\t\t\t\tif doc_type==0:\n\t\t\t\t\t\tfeatures = {\n\t\t\t\t\t\t\t\"raw_query_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"raw_query_mask\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"raw_query_segment_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"query_id\": tf.FixedLenFeature([], tf.int64),\n\t\t\t\t\t\t}\n\t\t\t\t\t\tsample = tf.parse_single_example(data_record, features)\n\t\t\t\t\t\tinput_ids = tf.cast(sample[\"raw_query_ids\"][:max_seq_len[doc_type]], tf.int32)\n\t\t\t\t\t\tsegment_ids = tf.cast(sample[\"raw_query_segment_ids\"][:max_seq_len[doc_type]], tf.int32)\n\t\t\t\t\t\tinput_mask = tf.ones_like(input_ids)\n\t\t\t\t\t\teffective_input_mask = tf.cast(sample[\"raw_query_mask\"][:max_seq_len[doc_type]], tf.int32)\n\t\t\t\t\t\tdocid = tf.cast(sample[\"query_id\"], tf.int32)\n\t\t\t\t\t\tlabel = tf.cast(0, tf.int32) #dummy\n\t\t\t\t\t\tfeatures = {\n\t\t\t\t\t\t\t\"input_ids\": input_ids,\n\t\t\t\t\t\t\t\"segment_ids\": segment_ids,\n\t\t\t\t\t\t\t\"input_mask\": input_mask,\n\t\t\t\t\t\t\t\"effective_input_mask\": effective_input_mask,\n\t\t\t\t\t\t\t\"docid\": docid,\n\t\t\t\t\t\t\t\"label\": label,\n\t\t\t\t\t\t}\n\t\t\t\t\telif doc_type==1:\n\t\t\t\t\t\tfeatures = {\n\t\t\t\t\t\t\t\"doc_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"docid\": tf.FixedLenFeature([], tf.int64),\n\t\t\t\t\t\t}\n\t\t\t\t\t\tsample = tf.parse_single_example(data_record, features)\n\t\t\t\t\t\tdoc_ids = sample[\"doc_ids\"][:max_seq_len[doc_type]]\n\n\t\t\t\t\t\tinput_ids = tf.cast(doc_ids, tf.int32)\n\t\t\t\t\t\tsegment_ids = tf.zeros_like(input_ids)\n\t\t\t\t\t\tinput_mask = tf.ones_like(input_ids)\n\t\t\t\t\t\tdocid = tf.cast(sample[\"docid\"], tf.int32)\n\t\t\t\t\t\tlabel = tf.cast(0, tf.int32) #dummy\n\t\t\t\t\t\tfeatures = {\n\t\t\t\t\t\t\t\"input_ids\": input_ids,\n\t\t\t\t\t\t\t\"segment_ids\": segment_ids,\n\t\t\t\t\t\t\t\"input_mask\": input_mask,\n\t\t\t\t\t\t\t\"effective_input_mask\": input_mask,\n\t\t\t\t\t\t\t\"docid\": docid,\n\t\t\t\t\t\t\t\"label\": label,\n\t\t\t\t\t\t}\n\t\t\t\telif is_eval:\n\t\t\t\t\tfeatures = {\n\t\t\t\t\t\t\"query_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\"query_segment_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\"query_mask\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\"doc_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\"label\": tf.FixedLenFeature([], tf.int64),\n\t\t\t\t\t}\n\t\t\t\t\tsample = tf.parse_single_example(data_record, features)\n\n\t\t\t\t\tquery_ids = tf.cast(sample[\"query_ids\"][:max_q_len], tf.int32)\n\t\t\t\t\tquery_segment_ids = tf.cast(sample[\"query_segment_ids\"][:max_q_len], tf.int32)\n\t\t\t\t\t# query_segment_ids = tf.zeros_like(query_ids)\n\t\t\t\t\tquery_mask = tf.ones_like(query_ids)\n\t\t\t\t\teffective_query_mask = tf.cast(sample[\"query_mask\"][:max_q_len], tf.int32)\n\t\t\t\t\t# effective_query_mask = tf.ones_like(query_ids)\n\n\t\t\t\t\tdocx_ids = tf.cast(sample[\"doc_ids\"][:max_p_len], tf.int32)\n\t\t\t\t\tdocx_segment_ids = tf.zeros_like(docx_ids)\n\t\t\t\t\tdocx_mask = tf.ones_like(docx_ids)\n\t\t\t\t\teffective_docx_mask = tf.ones_like(docx_ids)\n\n\n\t\t\t\t\tlabel = tf.cast(sample[\"label\"], tf.int32)\n\n\t\t\t\t\tfeatures = {\n\t\t\t\t\t\t\"query_ids\": query_ids,\n\t\t\t\t\t\t\"query_segment_ids\": query_segment_ids,\n\t\t\t\t\t\t\"query_mask\": query_mask,\n\t\t\t\t\t\t\"effective_query_mask\": effective_query_mask,\n\t\t\t\t\t\t\"docx_ids\": docx_ids,\n\t\t\t\t\t\t\"docx_segment_ids\": docx_segment_ids,\n\t\t\t\t\t\t\"docx_mask\": docx_mask,\n\t\t\t\t\t\t\"effective_docx_mask\": effective_docx_mask,\n\t\t\t\t\t\t\"label\": label,\n\t\t\t\t\t}\n\n\n\t\t\treturn features\n\n\t\tdataset = tf.data.TFRecordDataset([dataset_path])\n\t\tdataset = dataset.map(\n\t\t\textract_fn, num_parallel_calls=4).prefetch(output_buffer_size)\n\n\t\tif is_training:\n\t\t\tdataset = dataset.repeat()\n\t\t\tdataset = dataset.shuffle(buffer_size=1000)\n\t\t\tdataset = dataset.padded_batch(\n\t\t\t\t\t\tbatch_size=batch_size,\n\t\t\t\t\t\tpadded_shapes={\n\t\t\t\t\t\t\t\"raw_query_ids\": [max_q_len],\n\t\t\t\t\t\t\t\"raw_query_segment_ids\": [max_q_len],\n\t\t\t\t\t\t\t\"raw_query_mask\": [max_q_len],\n\t\t\t\t\t\t\t\"effective_raw_query_mask\": [max_q_len],\n\t\t\t\t\t\t\t\"rewrite_query_ids\": [max_q_len],\n\t\t\t\t\t\t\t\"rewrite_query_segment_ids\": [max_q_len],\n\t\t\t\t\t\t\t\"rewrite_query_mask\": [max_q_len],\n\t\t\t\t\t\t\t\"effective_rewrite_query_mask\": [max_q_len],\n\t\t\t\t\t\t\t\"doc0_ids\": [max_p_len],\n\t\t\t\t\t\t\t\"doc0_segment_ids\": [max_p_len],\n\t\t\t\t\t\t\t\"doc0_mask\": [max_p_len],\n\t\t\t\t\t\t\t\"effective_doc0_mask\": [max_p_len],\n\t\t\t\t\t\t\t\"doc1_ids\": [max_p_len],\n\t\t\t\t\t\t\t\"doc1_segment_ids\": [max_p_len],\n\t\t\t\t\t\t\t\"doc1_mask\": [max_p_len],\n\t\t\t\t\t\t\t\"effective_doc1_mask\": [max_p_len],\n\t\t\t\t\t\t\t\"label\": []\n\t\t\t\t\t\t},\n\t\t\t\t\t\tpadding_values={\n\t\t\t\t\t\t\t\"raw_query_ids\": 0,\n\t\t\t\t\t\t\t\"raw_query_segment_ids\": 0,\n\t\t\t\t\t\t\t\"raw_query_mask\": 0,\n\t\t\t\t\t\t\t\"effective_raw_query_mask\":0,\n\t\t\t\t\t\t\t\"rewrite_query_ids\": 0,\n\t\t\t\t\t\t\t\"rewrite_query_segment_ids\": 0,\n\t\t\t\t\t\t\t\"rewrite_query_mask\": 0,\n\t\t\t\t\t\t\t\"effective_rewrite_query_mask\":0,\n\t\t\t\t\t\t\t\"doc0_ids\": 0,\n\t\t\t\t\t\t\t\"doc0_segment_ids\": 0,\n\t\t\t\t\t\t\t\"doc0_mask\": 0,\n\t\t\t\t\t\t\t\"effective_doc0_mask\": 0,\n\t\t\t\t\t\t\t\"doc1_ids\": 0,\n\t\t\t\t\t\t\t\"doc1_segment_ids\": 0,\n\t\t\t\t\t\t\t\"doc1_mask\": 0,\n\t\t\t\t\t\t\t\"effective_doc1_mask\": 0,\n\t\t\t\t\t\t\t\"label\": 0.0,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tdrop_remainder=True)\n\t\telse:\n\t\t\tif max_eval_examples:\n\t\t\t\t# Use at most this number of examples (debugging only).\n\t\t\t\tdataset = dataset.take(max_eval_examples)\n\t\t\tif is_output:\n\t\t\t\tdataset = dataset.padded_batch(\n\t\t\t\t\t\t\tbatch_size=batch_size,\n\t\t\t\t\t\t\tpadded_shapes={\n\t\t\t\t\t\t\t\t\"input_ids\": [max_seq_len[doc_type]],\n\t\t\t\t\t\t\t\t\"segment_ids\": [max_seq_len[doc_type]],\n\t\t\t\t\t\t\t\t\"input_mask\": [max_seq_len[doc_type]],\n\t\t\t\t\t\t\t\t\"effective_input_mask\": [max_seq_len[doc_type]],\n\t\t\t\t\t\t\t\t\"docid\": [],\n\t\t\t\t\t\t\t\t\"label\": [],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tpadding_values={\n\t\t\t\t\t\t\t\t\"input_ids\": 0,\n\t\t\t\t\t\t\t\t\"segment_ids\": 0,\n\t\t\t\t\t\t\t\t\"input_mask\": 0,\n\t\t\t\t\t\t\t\t\"effective_input_mask\": 0,\n\t\t\t\t\t\t\t\t\"docid\": 0,\n\t\t\t\t\t\t\t\t\"label\": 0,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tdrop_remainder=True)\n\n\t\t\telif is_eval:\n\t\t\t\tdataset = dataset.padded_batch(\n\t\t\t\t\t\t\t\tbatch_size=batch_size,\n\t\t\t\t\t\t\t\tpadded_shapes={\n\t\t\t\t\t\t\t\t\t\"query_ids\": [max_q_len],\n\t\t\t\t\t\t\t\t\t\"query_segment_ids\": [max_q_len],\n\t\t\t\t\t\t\t\t\t\"query_mask\": [max_q_len],\n\t\t\t\t\t\t\t\t\t\"effective_query_mask\": [max_q_len],\n\t\t\t\t\t\t\t\t\t\"docx_ids\": [max_p_len],\n\t\t\t\t\t\t\t\t\t\"docx_segment_ids\": [max_p_len],\n\t\t\t\t\t\t\t\t\t\"docx_mask\": [max_p_len],\n\t\t\t\t\t\t\t\t\t\"effective_docx_mask\": [max_p_len],\n\t\t\t\t\t\t\t\t\t\"label\": []\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tpadding_values={\n\t\t\t\t\t\t\t\t\t\"query_ids\": 0,\n\t\t\t\t\t\t\t\t\t\"query_segment_ids\": 0,\n\t\t\t\t\t\t\t\t\t\"query_mask\": 0,\n\t\t\t\t\t\t\t\t\t\"effective_query_mask\": 0,\n\t\t\t\t\t\t\t\t\t\"docx_ids\": 0,\n\t\t\t\t\t\t\t\t\t\"docx_segment_ids\": 0,\n\t\t\t\t\t\t\t\t\t\"docx_mask\": 0,\n\t\t\t\t\t\t\t\t\t\"effective_docx_mask\":0,\n\t\t\t\t\t\t\t\t\t\"label\": 0,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tdrop_remainder=True)\n\t\treturn dataset", "def make_input_fn(step_output):\n return tf.nn.embedding_lookup(embeddings, step_output.predictions)", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.float32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=1000)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d", "def input_fn(params):\n #batch_size = params[\"batch_size\"]\n batch_size = FLAGS.train_batch_size if is_training else FLAGS.eval_batch_size\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n if FLAGS.use_horovod:\n d = d.shard(hvd.size(), hvd.rank())\n d = d.repeat()\n d = d.shuffle(buffer_size=100, seed=FLAGS.random_seed)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n return d", "def get_train_input_fn(self, input_name):\n def train_input_fn():\n # Get filenames\n data_dir = pathlib.Path(self._TRAIN_DATA_DIR)\n list_ds = tf.data.Dataset.list_files(str(data_dir / '*'))\n\n # Create data pre-processing functions\n funcs = self._get_data_preprocessing_fns()\n\n # Get labeled dataset\n ds = list_ds.map(funcs.process_path, num_parallel_calls=AUTOTUNE)\n # Format conversion\n ds = ds.map(funcs.convert_format, num_parallel_calls=AUTOTUNE)\n # Map rotate function\n if self._RANDOM_ROTATE:\n ds = ds.map(funcs.random_rotate, num_parallel_calls=AUTOTUNE)\n # Map zoom-in function\n if self._RANDOM_ZOOM:\n ds = ds.map(funcs.random_zoom, num_parallel_calls=AUTOTUNE)\n # Resizing\n ds = ds.map(funcs.resize, num_parallel_calls=AUTOTUNE)\n\n # Prepare for tf.estimator\n ds = ds.map(lambda img, label: ({input_name: img}, label))\n\n # Shuffle, batch, repeat, prefetch\n ds = ds.shuffle(buffer_size=self._SHUFFLE_BUFFER_SIZE)\n ds = ds.batch(self._TRAIN_BATCH_SIZE)\n ds = ds.repeat()\n ds = ds.prefetch(buffer_size=self._PREFETCH_BUFFER_SIZE)\n\n return ds\n return train_input_fn", "def get_input_files(self, action):\n\n def input_function(wildcards):\n \"\"\"Helper rapper function\"\"\"\n return expand(\n self.base_path_in.format(wildcards=wildcards),\n postproc=[self._get_postproc_token()],\n ext=self.extensions,\n )\n\n assert action == \"run\", \"Unsupported action\"\n return input_function", "def input_fn(params):\n # batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=buffer_size)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d", "def input_fn_builder(features, seq_length, is_training, drop_remainder):\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn", "def file_based_input_fn_builder(input_file, seq_length, is_training,drop_remainder,batch_size,sample_length):\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"output_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n def input_fn():\n \"\"\"The actual input function.\"\"\"\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=sample_length)\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: tf.parse_single_example(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n return d\n return input_fn", "def get_function_input(inputs, input_name, optional=False):\n this_input = inputs.get(input_name)\n\n if this_input is None and optional is False:\n err = \"'{0}' is a mandatory function input\".format(input_name)\n raise ValueError(err)\n else:\n return this_input", "def input_processing(func, config, input_ids, **kwargs):\n signature = dict(inspect.signature(func).parameters)\n signature.pop(\"kwargs\", None)\n parameter_names = list(signature.keys())\n output = {}\n allowed_types = (tf.Tensor, bool, int, tuple, list, dict)\n\n if \"inputs\" in kwargs[\"kwargs_call\"]:\n logger.warning(\n \"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.\",\n FutureWarning)\n\n output[\"input_ids\"] = kwargs[\"kwargs_call\"].pop(\"inputs\")\n\n if \"decoder_cached_states\" in kwargs[\"kwargs_call\"]:\n logger.warning(\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n output[\"past_key_values\"] = kwargs[\"kwargs_call\"].pop(\"decoder_cached_states\")\n\n if len(kwargs[\"kwargs_call\"]) > 0:\n raise ValueError(\n f\"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}.\"\n )\n\n for k, v in kwargs.items():\n if isinstance(v, allowed_types) or v is None:\n output[k] = v\n else:\n raise ValueError(f\"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.\")\n\n if isinstance(input_ids, (tuple, list)):\n for i, input in enumerate(input_ids):\n # EagerTensors don't allow to use the .name property so we check for a real Tensor\n if type(input) == tf.Tensor:\n # Tensor names have always the pattern name:device_id then we check only the\n # name and not the device id\n tensor_name = input.name.split(\":\")[0]\n\n if tensor_name in parameter_names:\n output[tensor_name] = input\n else:\n output[parameter_names[i]] = input\n elif isinstance(input, allowed_types) or input is None:\n output[parameter_names[i]] = input\n else:\n raise ValueError(\n f\"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}.\"\n )\n elif isinstance(input_ids, dict):\n if \"inputs\" in input_ids:\n logger.warning(\n \"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.\",\n FutureWarning,\n )\n\n output[\"input_ids\"] = input_ids.pop(\"inputs\")\n\n if \"decoder_cached_states\" in input_ids:\n logger.warning(\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n output[\"past_key_values\"] = input_ids.pop(\"decoder_cached_states\")\n\n for k, v in dict(input_ids).items():\n if isinstance(v, allowed_types) or v is None:\n output[k] = v\n elif k not in parameter_names and \"args\" not in parameter_names:\n logger.warning(\n f\"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored.\"\n )\n continue\n else:\n raise ValueError(f\"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.\")\n else:\n if isinstance(input_ids, tf.Tensor) or input_ids is None:\n output[parameter_names[0]] = input_ids\n else:\n raise ValueError(\n f\"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}.\"\n )\n\n for name in parameter_names:\n if name not in list(output.keys()) and name != \"args\":\n output[name] = kwargs.pop(name, signature[name].default)\n\n # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)\n # So to respect the proper output we have to add this exception\n if \"args\" in output:\n if output[\"args\"] is not None and type(output[\"args\"]) == tf.Tensor:\n tensor_name = output[\"args\"].name.split(\":\")[0]\n output[tensor_name] = output[\"args\"]\n else:\n # `args` in this case is always the first parameter, then `input_ids`\n output[\"input_ids\"] = output[\"args\"]\n\n del output[\"args\"]\n\n if \"kwargs\" in output:\n del output[\"kwargs\"]\n\n boolean_dict = {\n k: v\n for k, v in output.items()\n if k in [\"return_dict\", \"output_attentions\", \"output_hidden_states\", \"use_cache\"]\n }\n\n output.update(\n booleans_processing(\n config=config,\n **boolean_dict,\n )\n )\n\n return output", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d", "def input_fn(params):\n batch_size = self.batch_size\n\n num_examples = len(features)\n\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"label_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size)\n return d", "def input_fn(params):\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n batch_size = params[\"train_batch_size\"]\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n else:\n batch_size = params[\"predict_batch_size\"]\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d", "def train_input_fn(self) -> types.FeatureAndLabelTensors:\n return self._input_fn_from_file(self._train_path)", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d", "def input_fn_builder(self, features, max_seq_len, batch_size, is_training):\n\n all_input_ids = []\n all_input_mask = []\n all_label_ids = []\n all_label_mask = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_label_ids.append(feature.label_ids)\n all_label_mask.append(feature.label_mask)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = self.batch_size\n\n num_examples = len(features)\n\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"label_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size)\n return d\n\n return input_fn", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder,\n )\n )\n\n return d", "def input_fn_by_dataset_with_fake_data(input_size, padding_length):\n def _input_fn(params):\n \"\"\"A `input_fn` returning features and labels.\"\"\"\n batch_size = params['batch_size']\n inputs, labels, lengths = sequence_example_lib.get_fake_data_batch(\n batch_size, input_size, padding_length)\n\n features = {\n 'inputs': inputs,\n 'lengths': lengths\n }\n return features, labels\n return _input_fn", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices(\n {\n \"label_ids\": tf.constant(\n all_label_ids, shape=[num_examples], dtype=tf.int32\n ),\n \"input_ids\": tf.constant(\n all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n \"input_mask\": tf.constant(\n all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n \"segment_ids\": tf.constant(\n all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n }\n )\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d", "def get_input_fn(options, is_training):\n if not isinstance(options, reader_pb2.VCRReader):\n raise ValueError('options has to be an instance of Reader.')\n\n def _input_fn(input_pipeline_context=None):\n \"\"\"Returns a python dictionary.\n\n Returns:\n A dataset that can be fed to estimator.\n \"\"\"\n return _create_dataset(options, is_training, input_pipeline_context)\n\n return _input_fn", "def __createInputFunction(self):\r\n self.InputFunctionLabel = QLabel(\"f(x)\")\r\n\r\n self.InputFunctionField = QLineEdit(self)\r\n self.InputFunctionField.setPlaceholderText(\"5*x^3 + 2*x\")", "def file_based_input_fn_builder(input_file):\n # 存放解析自TFRecord文件的数据\n name_to_features = {\n \"input_q\":tf.FixedLenFeature([shape],tf.int64),\n \"input_K\":tf.FixedLenFeature([],tf.float32),\n \"input_v\":tf.FixedLenFeature([],tf.float32),\n }\n\n def _decode_record(record,name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record,name_to_features)\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size = 100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record:_decode_record(record, name_to_features),\n batch_size = batch_size,\n drop_remainder=drop_remainder))\n return d\n return input_fn", "def input_fn():\n raw_placeholder_spec = RAW_DATA_METADATA.schema.as_batched_placeholders()\n # remove label key that is not going to be available at seving\n raw_placeholder_spec.pop(LABEL_KEY)\n\n # we are defining the feature_column (raw_featutes) and the tensor\n # (receiver_tensors) for the raw data\n raw_input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(\n raw_placeholder_spec)\n raw_features, receiver_tensors , _ = raw_input_fn()\n\n # we are tranforming the raw_features with the graph written by\n # preprocess.py to transform_fn_io.TRANSFORM_FN_DIR and that was used to\n # write the tf records. This helps avoiding training/serving skew\n\n _, transformed_features = (\n saved_transform_io.partially_apply_saved_transform(\n os.path.join(tft_working_dir, transform_fn_io.TRANSFORM_FN_DIR),\n raw_features))\n\n return tf.estimator.export.ServingInputReceiver(\n transformed_features, receiver_tensors)", "def input_fn_builder(self, input_file, seq_length, is_training, drop_remainder):\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if is_training:\n name_to_features[\"subject\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"property\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"value\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn", "def orig_function(inputs, outputs, mode=None, accept_inplace=False,\r\n name=None, profile=None, on_unused_input=None):\r\n\r\n # Every element of the input list will be upgraded to an `In` instance if\r\n # necessary, using the rules implemented by the `convert_function_input`\r\n # function.\r\n\r\n # Similarly, every element of the output list will be upgraded to an `Out`\r\n # instance if necessary:\r\n\r\n t1 = time.time()\r\n mode = theano.compile.mode.get_mode(mode)\r\n\r\n inputs = map(convert_function_input, inputs)\r\n if outputs is not None:\r\n if isinstance(outputs, (list, tuple)):\r\n outputs = map(FunctionMaker.wrap_out, outputs)\r\n else:\r\n outputs = FunctionMaker.wrap_out(outputs)\r\n\r\n defaults = [getattr(input, 'value', None) for input in inputs]\r\n\r\n if isinstance(mode, (list, tuple)): # \"mode comparison\" semantics\r\n raise Exception(\"We do not support the passing of multiple modes\")\r\n else:\r\n Maker = getattr(mode, 'function_maker', FunctionMaker)\r\n fn = Maker(inputs,\r\n outputs,\r\n mode,\r\n accept_inplace=accept_inplace,\r\n profile=profile,\r\n on_unused_input=on_unused_input).create(\r\n defaults)\r\n\r\n t2 = time.time()\r\n if profile:\r\n profile.compile_time += t2 - t1\r\n\r\n fn.name = name\r\n fn.maker.fgraph.name = name\r\n return fn", "def input_fn(params):\n print(params)\n batch_size = 500\n\n num_examples = len(features)\n\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d", "def make_serving_input_fn(self, ):\n feature_spec = tf.feature_column.make_parse_example_spec(\n self.example_feature_columns().values())\n return tf.estimator.export.build_parsing_serving_input_receiver_fn(\n feature_spec)", "def generate(func, *inputs):\n # http://blog.kevinastone.com/generate-your-tests.html\n def decorator(testcase):\n for input in inputs:\n test_input = make_method(func, input)\n setattr(testcase, test_input.__name__, test_input)\n return testcase\n\n return decorator", "def get_input(inputs):\n return input(inputs)", "def process_inputs(self, inputs):", "def compile_test_fn(model):\n logger.info(\"Building val_fn\")\n acoustic_input = model.inputs[0]\n network_output = model.outputs[0]\n ctc_input_lengths = K.placeholder(ndim=2, dtype='int32')\n\n\n val_fn = K.function([acoustic_input, ctc_input_lengths,\n K.learning_phase()],\n [network_output])\n return val_fn", "def input_fn(params, is_training):\n features = tf.constant(0)\n labels = tf.constant(0)\n\n return features, labels", "def __init__(self, fn, input_storage, output_storage, indices, outputs,\r\n defaults, unpack_single, return_none, maker):\r\n\r\n self.fn = fn\r\n self.input_storage = input_storage\r\n self.output_storage = output_storage\r\n self.indices = indices\r\n self.outputs = outputs\r\n self.defaults = defaults\r\n self.unpack_single = unpack_single\r\n self.return_none = return_none\r\n self.maker = maker\r\n self.profile = None # reassigned in FunctionMaker.create\r\n self.trust_input = False # If True, we don't check the input parameter\r\n self.name = None\r\n\r\n # We will be popping stuff off this `containers` object. It is a copy.\r\n containers = list(self.input_storage)\r\n finder = {}\r\n inv_finder = {}\r\n\r\n def distribute(indices, cs, value):\r\n input.distribute(value, indices, cs)\r\n for c in cs:\r\n c.provided += 1\r\n #def assign(c, v):\r\n #c.data = v\r\n\r\n # Store the list of names of named inputs.\r\n named_inputs = []\r\n # Count the number of un-named inputs.\r\n n_unnamed_inputs = 0\r\n\r\n #setters = []\r\n # Initialize the storage\r\n # this loop works by modifying the elements (as variable c) of self.input_storage inplace.\r\n for i, ((input, indices, sinputs), (required, refeed, value)) in enumerate(zip(self.indices, defaults)):\r\n if indices is None: # this is true iff input is not a SymbolicInputKit\r\n c = containers[0] #containers is being used as a stack. Here we pop off the next one.\r\n c.strict = getattr(input, 'strict', False)\r\n c.allow_downcast = getattr(input, 'allow_downcast', None)\r\n\r\n if value is not None:\r\n # Always initialize the storage.\r\n if isinstance(value, gof.Container):\r\n # There is no point in obtaining the current value\r\n # stored in the container, since the container is\r\n # shared.\r\n # For safety, we make sure 'refeed' is False, since\r\n # there is no need to refeed the defaullt value.\r\n assert not refeed\r\n else:\r\n c.value = value\r\n c.required = required\r\n c.implicit = input.implicit\r\n c.provided = 0 # this is a count of how many times the input has been provided (reinitialized to 0 on __call__)\r\n finder[i] = c\r\n finder[input.variable] = c\r\n if input.name not in finder:\r\n finder[input.name] = c\r\n else:\r\n finder[input.name] = DUPLICATE\r\n if input.name is None:\r\n n_unnamed_inputs += 1\r\n else:\r\n named_inputs.append(input.name)\r\n #backport\r\n #finder[input.name] = c if input.name not in finder else DUPLICATE\r\n # inv_finder maps the container to the input (useful for one error message)\r\n inv_finder[c] = input\r\n #setters.append(partial(assign, c))\r\n containers[:1] = []\r\n else:\r\n # TODO The following code may need to do something to handle\r\n # implicit inputs.\r\n\r\n # The input is a SymbolicInputKit, so we take as many containers as the Kit provides inputs\r\n cs = containers[:len(indices)]\r\n # distribute does the initialization of the containers\r\n input.distribute(value, indices, cs)\r\n f = partial(distribute, indices, cs)\r\n # Like before, we set a finder entry for the kit. Note that\r\n # we are not mapping to a container but to a function which\r\n # can reinitialize all the containers\r\n finder[i] = f\r\n finder[input] = f\r\n if input.name not in finder:\r\n finder[input.name] = f\r\n else:\r\n finder[input.name] = DUPLICATE\r\n #backport\r\n #finder[input.name] = f if input.name not in finder else DUPLICATE\r\n #setters.append(f)\r\n # For each input in the kit and its corresponding container, we put an entry in finder.\r\n # This allows the user to micro-manage elements of the kit if need be.\r\n # All containers inherit the required field and have their own \"provided\" counter\r\n for c, sin in zip(cs, sinputs):\r\n finder[sin.variable] = c\r\n finder[sin.name] = c\r\n if sin.name not in finder:\r\n finder[sin.name] = c\r\n else:\r\n finder[sin.name] = DUPLICATE\r\n #backport\r\n #finder[sin.name] = c if sin.name not in finder else DUPLICATE\r\n inv_finder[c] = input\r\n c.required = required\r\n c.provided = 0\r\n containers[:len(indices)] = []\r\n\r\n self.finder = finder\r\n self.inv_finder = inv_finder\r\n\r\n # this class is important in overriding the square-bracket notation:\r\n # fn.value[x]\r\n # self reference is available via the closure on the class\r\n class ValueAttribute(object):\r\n def __getitem__(self, item):\r\n try:\r\n s = finder[item]\r\n except KeyError:\r\n raise TypeError(\"Unknown input or state: %s\" % str(item))\r\n if s is DUPLICATE:\r\n raise TypeError(\"Ambiguous name: %s - please check the names \"\\\r\n \"of the inputs of your function for duplicates.\" % str(item))\r\n if isinstance(s, gof.Container):\r\n return s.value\r\n else:\r\n raise NotImplementedError\r\n def __setitem__(self, item, value):\r\n try:\r\n s = finder[item]\r\n except KeyError:\r\n # Print informative error message.\r\n msg = get_info_on_inputs(named_inputs, n_unnamed_inputs)\r\n raise TypeError(\"Unknown input or state: %s. %s\" %\r\n (str(item), msg))\r\n if s is DUPLICATE:\r\n raise TypeError(\"Ambiguous name: %s - please check the names \"\\\r\n \"of the inputs of your function for duplicates.\" % str(item))\r\n if isinstance(s, gof.Container):\r\n s.value = value\r\n s.provided += 1\r\n else:\r\n s(value)\r\n def __contains__(self, item):\r\n return finder.__contains__(item)\r\n\r\n # this class is important in overriding the square-bracket notation:\r\n # fn.container[x]\r\n # self reference is available via the closure on the class\r\n class ContainerAttribute(object):\r\n def __getitem__(self, item):\r\n return finder[item]\r\n def __contains__(self, item):\r\n return finder.__contains__(item)\r\n # You cannot set the container\r\n\r\n self._value = ValueAttribute()\r\n self._container = ContainerAttribute()\r\n\r\n # Compute self.n_returned_outputs.\r\n # This is used only when fn.need_update_inputs is False\r\n # because we're using one of the VM objects and it is\r\n # putting updates back into the input containers all by itself.\r\n assert len(self.maker.expanded_inputs) == len(self.input_storage)\r\n self.n_returned_outputs = len(self.output_storage)\r\n for input in self.maker.expanded_inputs:\r\n if input.update is not None:\r\n self.n_returned_outputs -= 1", "def build_input_fns(data_dir, batch_size):\n\n # Build an iterator over training batches.\n training_dataset = static_mnist_dataset(data_dir, \"train\")\n training_dataset = training_dataset.shuffle(50000).repeat().batch(batch_size)\n train_input_fn = lambda: training_dataset.make_one_shot_iterator().get_next()\n\n # Build an iterator over the heldout set.\n eval_dataset = static_mnist_dataset(data_dir, \"valid\")\n eval_dataset = eval_dataset.batch(batch_size)\n eval_input_fn = lambda: eval_dataset.make_one_shot_iterator().get_next()\n\n return train_input_fn, eval_input_fn", "def file_based_input_fn_builder(input_files, is_training, batch_size):\n\n name_to_features = {\n \"src_ids\": tf.VarLenFeature(tf.int64),\n \"tgt_ids\": tf.VarLenFeature(tf.int64),\n \"label\": tf.FixedLenFeature([1], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example[\"src_ids\"].values, example[\"tgt_ids\"].values, example[\"label\"][0]\n\n def input_fn():\n \"\"\"The actual input function.\"\"\"\n bos_id = tf.constant(BOS_ID, tf.int32)\n eos_id = tf.constant(EOS_ID, tf.int32)\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_files)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.map(lambda record: _decode_record(record, name_to_features))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n tf.concat([[bos_id], src_ids, [eos_id]], 0),\n tf.concat([tgt_ids, [eos_id]], 0),\n label))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n src_ids[:FLAGS.max_sequence_length],\n tgt_ids[:FLAGS.max_sequence_length],\n label\n ))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n tf.concat([src_ids, tgt_ids], 0),\n tf.concat([tf.zeros_like(src_ids), tf.ones_like(tgt_ids)], 0),\n label\n ))\n\n d = d.map(lambda input_ids, segment_ids, label_ids: (\n input_ids,\n segment_ids,\n tf.ones_like(input_ids),\n label_ids\n ))\n\n def batching_func(x):\n return x.padded_batch(\n batch_size,\n # The entry is the source line rows;\n # this has unknown-length vectors. The last entry is\n # the source row size; this is a scalar.\n padded_shapes=(\n tf.TensorShape([None]), # src\n tf.TensorShape([None]), # tgt\n tf.TensorShape([None]),\n tf.TensorShape([])), # src_len\n # Pad the source sequences with eos tokens.\n # (Though notice we don't generally need to do this since\n # later on we will be masking out calculations past the true sequence.\n padding_values=(\n PAD_ID, # src\n PAD_ID,\n PAD_ID,\n 0)) # src_len -- unused\n\n batched_dataset = batching_func(d)\n features = batched_dataset.map(lambda input_ids, segment_ids, input_mask, label:\n {\n \"input_ids\": input_ids,\n \"segment_ids\": segment_ids,\n \"input_mask\": input_mask,\n \"label_ids\": label\n\n })\n\n return features\n\n return input_fn", "def convert_function_input(input):\r\n if isinstance(input, (SymbolicInput, SymbolicInputKit)):\r\n return input\r\n elif isinstance(input, gof.Constant):\r\n raise TypeError('A Constant instance is not a legal function input',\r\n input)\r\n elif isinstance(input, gof.Variable):\r\n return In(input)\r\n elif isinstance(input, (list, tuple)):\r\n orig = input\r\n if not input:\r\n raise TypeError(\"Nonsensical input specification: %s\" % input)\r\n if isinstance(input[0], basestring):\r\n name = input[0]\r\n input = input[1:]\r\n else:\r\n name = None\r\n if isinstance(input[0], (list, tuple)):\r\n if len(input[0]) != 2 or len(input) != 2:\r\n raise TypeError(\"Invalid input syntax: %s (check \"\r\n \"documentation or use an In instance)\" % orig)\r\n (variable, update), value = input\r\n elif isinstance(input[0], gof.Variable):\r\n if len(input) == 1:\r\n variable, update, value = input[0], None, None\r\n elif len(input) == 2:\r\n (variable, value), update = input, None\r\n else:\r\n raise TypeError(\"Invalid input syntax: %s (check \"\r\n \"documentation or use an In instance)\" % orig)\r\n elif isinstance(input[0], (SymbolicInput, SymbolicInputKit)):\r\n if len(input) == 1:\r\n return input[0]\r\n elif len(input) == 2:\r\n input, value = input\r\n if name is not None:\r\n input.name = name\r\n input.value = value\r\n return input\r\n else:\r\n raise TypeError(\"The input specification is not valid: %s\" % input)\r\n\r\n if not isinstance(variable, gof.Variable):\r\n raise TypeError(\"Unknown input type: %s, expected Variable \"\r\n \"instance\" % type(variable), variable)\r\n if update is not None and not isinstance(update, gof.Variable):\r\n raise TypeError(\"Unknown update type: %s, expected Variable \"\r\n \"instance\" % type(update), update)\r\n if (value is not None and\r\n isinstance(value, (gof.Variable, SymbolicInput))):\r\n raise TypeError(\"The value for input %s should not be a Variable \"\r\n \"or SymbolicInput instance (got: %s)\" %\r\n (variable, value))\r\n\r\n return In(variable, name=name, value=value, update=update)\r\n else:\r\n raise TypeError(\"Unknown input type: %s, expected Variable instance\" %\r\n type(input), input)", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples, len(LABEL_COLUMNS)], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d", "def call(self, inputs):\n raise NotImplementedError", "def __init__(__self__, *,\n function_name: Optional[pulumi.Input[str]] = None,\n input: Optional[pulumi.Input[str]] = None,\n qualifier: Optional[pulumi.Input[str]] = None,\n result: Optional[pulumi.Input[str]] = None,\n triggers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if function_name is not None:\n pulumi.set(__self__, \"function_name\", function_name)\n if input is not None:\n pulumi.set(__self__, \"input\", input)\n if qualifier is not None:\n pulumi.set(__self__, \"qualifier\", qualifier)\n if result is not None:\n pulumi.set(__self__, \"result\", result)\n if triggers is not None:\n pulumi.set(__self__, \"triggers\", triggers)", "def input_fn(params):\n assert params[\"batch_size\"] * num_core_per_host == bsz_per_host\n\n datasets = []\n for files, func in zip(file_list, func_list):\n if files:\n cur_dataset = func(\n params=params,\n num_hosts=num_hosts,\n num_core_per_host=num_core_per_host,\n is_training=split == \"train\",\n file_names=files,\n seq_len=seq_len,\n num_predict=num_predict,\n use_bfloat16=use_bfloat16,\n **kwargs)\n\n datasets.append(cur_dataset)\n\n if len(datasets) > 1:\n dataset = tf.data.experimental.sample_from_datasets(datasets)\n elif len(datasets) == 1:\n dataset = datasets[0]\n\n return dataset", "def run(fn, *input_values, **kwds):\n \n ee = kwds.get('ee', shared_exec_engine)\n input_types = [arg.type for arg in fn.args]\n gv_inputs = [gv_from_python(x, t) \n for (x,t) in \n zip(input_values, input_types)]\n \n return run_with_generic_values(fn, gv_inputs, ee)", "def create_predict_input_fn(features, batch_size):\n def _input_fn():\n # raw_features = {\"MFCCs\": features.values}\n ds = Dataset.from_tensor_slices((features.to_dict('list'))) # warning: 2GB limit\n ds = ds.batch(batch_size)\n # Return the next batch of data.\n feature_batch = ds.make_one_shot_iterator().get_next()\n return feature_batch\n return _input_fn", "def input_fn_builder(dataset_path, max_q_len, max_p_len, doc_type,\n\t\t\t\t\t is_training, is_eval, is_output, max_eval_examples=None):\n\tdef input_fn(params):\n\t\t\"\"\"The actual input function.\"\"\"\n\t\tbatch_size = params[\"batch_size\"]\n\t\toutput_buffer_size = batch_size * 1000\n\t\tmax_seq_len = {0: max_q_len, 1:max_p_len}\n\n\n\t\tdef extract_fn(data_record):\n\t\t\tif is_training:\n\t\t\t\tfeatures = {\n\t\t\t\t\t\t\t\"raw_query_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"raw_query_mask\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"raw_query_segment_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"rewrite_query_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"rewrite_query_mask\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"rewrite_query_segment_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"doc_ids0\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"doc_ids1\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"label\": tf.FixedLenFeature([], tf.int64),\n\t\t\t\t\t\t\t}\n\t\t\t\tsample = tf.parse_single_example(data_record, features)\n\n\t\t\t\traw_query_ids = tf.cast(sample[\"raw_query_ids\"][:max_q_len], tf.int32)\n\t\t\t\traw_query_segment_ids = tf.cast(sample[\"raw_query_segment_ids\"][:max_q_len], tf.int32)\n\t\t\t\traw_query_mask = tf.ones_like(raw_query_ids)\n\t\t\t\t# effective_raw_query_mask = tf.ones_like(query_ids)\n\t\t\t\teffective_raw_query_mask = tf.cast(sample[\"raw_query_mask\"][:max_q_len], tf.int32)\n\n\t\t\t\trewrite_query_ids = tf.cast(sample[\"rewrite_query_ids\"][:max_q_len], tf.int32)\n\t\t\t\trewrite_query_segment_ids = tf.cast(sample[\"rewrite_query_segment_ids\"][:max_q_len], tf.int32)\n\t\t\t\trewrite_query_mask = tf.ones_like(rewrite_query_ids)\n\t\t\t\teffective_rewrite_query_mask = tf.cast(sample[\"rewrite_query_mask\"][:max_q_len], tf.int32)\n\n\n\t\t\t\tdoc0_ids = tf.cast(sample[\"doc_ids0\"][:max_p_len], tf.int32)\n\t\t\t\tdoc0_segment_ids = tf.zeros_like(doc0_ids)\n\t\t\t\tdoc0_mask = tf.ones_like(doc0_ids)\n\t\t\t\teffective_doc0_mask = tf.ones_like(doc0_ids)\n\n\n\t\t\t\tdoc1_ids = tf.cast(sample[\"doc_ids1\"][:max_p_len], tf.int32)\n\t\t\t\tdoc1_segment_ids = tf.zeros_like(doc1_ids)\n\t\t\t\tdoc1_mask = tf.ones_like(doc1_ids)\n\t\t\t\teffective_doc1_mask = tf.ones_like(doc1_ids)\n\n\n\t\t\t\tlabel = tf.cast(sample[\"label\"], tf.float32)\n\n\n\t\t\t\tfeatures = {\n\t\t\t\t\t\"raw_query_ids\": raw_query_ids,\n\t\t\t\t\t\"raw_query_segment_ids\": raw_query_segment_ids,\n\t\t\t\t\t\"raw_query_mask\": raw_query_mask,\n\t\t\t\t\t\"effective_raw_query_mask\": effective_raw_query_mask,\n\t\t\t\t\t\"rewrite_query_ids\": rewrite_query_ids,\n\t\t\t\t\t\"rewrite_query_segment_ids\": rewrite_query_segment_ids,\n\t\t\t\t\t\"rewrite_query_mask\": rewrite_query_mask,\n\t\t\t\t\t\"effective_rewrite_query_mask\": effective_rewrite_query_mask,\n\t\t\t\t\t\"doc0_ids\": doc0_ids,\n\t\t\t\t\t\"doc0_segment_ids\": doc0_segment_ids,\n\t\t\t\t\t\"doc0_mask\": doc0_mask,\n\t\t\t\t\t\"effective_doc0_mask\": effective_doc0_mask,\n\t\t\t\t\t\"doc1_ids\": doc1_ids,\n\t\t\t\t\t\"doc1_segment_ids\": doc1_segment_ids,\n\t\t\t\t\t\"doc1_mask\": doc1_mask,\n\t\t\t\t\t\"effective_doc1_mask\": effective_doc1_mask,\n\t\t\t\t\t\"label\": label,\n\t\t\t\t\t}\n\t\t\telse:\n\t\t\t\tif is_output:\n\t\t\t\t\tif doc_type==0:\n\t\t\t\t\t\tfeatures = {\n\t\t\t\t\t\t\t\"raw_query_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"raw_query_mask\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"raw_query_segment_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"query_id\": tf.FixedLenFeature([], tf.int64),\n\t\t\t\t\t\t}\n\t\t\t\t\t\tsample = tf.parse_single_example(data_record, features)\n\t\t\t\t\t\tinput_ids = tf.cast(sample[\"raw_query_ids\"][:max_seq_len[doc_type]], tf.int32)\n\t\t\t\t\t\tsegment_ids = tf.cast(sample[\"raw_query_segment_ids\"][:max_seq_len[doc_type]], tf.int32)\n\t\t\t\t\t\tinput_mask = tf.ones_like(input_ids)\n\t\t\t\t\t\teffective_input_mask = tf.cast(sample[\"raw_query_mask\"][:max_seq_len[doc_type]], tf.int32)\n\t\t\t\t\t\tdocid = tf.cast(sample[\"query_id\"], tf.int32)\n\t\t\t\t\t\tlabel = tf.cast(0, tf.int32) #dummy\n\t\t\t\t\t\tfeatures = {\n\t\t\t\t\t\t\t\"input_ids\": input_ids,\n\t\t\t\t\t\t\t\"segment_ids\": segment_ids,\n\t\t\t\t\t\t\t\"input_mask\": input_mask,\n\t\t\t\t\t\t\t\"effective_input_mask\": effective_input_mask,\n\t\t\t\t\t\t\t\"docid\": docid,\n\t\t\t\t\t\t\t\"label\": label,\n\t\t\t\t\t\t}\n\t\t\t\t\telif doc_type==1:\n\t\t\t\t\t\tfeatures = {\n\t\t\t\t\t\t\t\"doc_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\t\"docid\": tf.FixedLenFeature([], tf.int64),\n\t\t\t\t\t\t}\n\t\t\t\t\t\tsample = tf.parse_single_example(data_record, features)\n\t\t\t\t\t\tdoc_ids = sample[\"doc_ids\"][:max_seq_len[doc_type]]\n\n\t\t\t\t\t\tinput_ids = tf.cast(doc_ids, tf.int32)\n\t\t\t\t\t\tsegment_ids = tf.zeros_like(input_ids)\n\t\t\t\t\t\tinput_mask = tf.ones_like(input_ids)\n\t\t\t\t\t\tdocid = tf.cast(sample[\"docid\"], tf.int32)\n\t\t\t\t\t\tlabel = tf.cast(0, tf.int32) #dummy\n\t\t\t\t\t\tfeatures = {\n\t\t\t\t\t\t\t\"input_ids\": input_ids,\n\t\t\t\t\t\t\t\"segment_ids\": segment_ids,\n\t\t\t\t\t\t\t\"input_mask\": input_mask,\n\t\t\t\t\t\t\t\"effective_input_mask\": input_mask,\n\t\t\t\t\t\t\t\"docid\": docid,\n\t\t\t\t\t\t\t\"label\": label,\n\t\t\t\t\t\t}\n\t\t\t\telif is_eval:\n\t\t\t\t\tfeatures = {\n\t\t\t\t\t\t\"query_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\"query_segment_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\"query_mask\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\"doc_ids\": tf.FixedLenSequenceFeature(\n\t\t\t\t\t\t\t[], tf.int64, allow_missing=True),\n\t\t\t\t\t\t\"label\": tf.FixedLenFeature([], tf.int64),\n\t\t\t\t\t}\n\t\t\t\t\tsample = tf.parse_single_example(data_record, features)\n\n\t\t\t\t\tquery_ids = tf.cast(sample[\"query_ids\"][:max_q_len], tf.int32)\n\t\t\t\t\tquery_segment_ids = tf.cast(sample[\"query_segment_ids\"][:max_q_len], tf.int32)\n\t\t\t\t\t# query_segment_ids = tf.zeros_like(query_ids)\n\t\t\t\t\tquery_mask = tf.ones_like(query_ids)\n\t\t\t\t\teffective_query_mask = tf.cast(sample[\"query_mask\"][:max_q_len], tf.int32)\n\t\t\t\t\t# effective_query_mask = tf.ones_like(query_ids)\n\n\t\t\t\t\tdocx_ids = tf.cast(sample[\"doc_ids\"][:max_p_len], tf.int32)\n\t\t\t\t\tdocx_segment_ids = tf.zeros_like(docx_ids)\n\t\t\t\t\tdocx_mask = tf.ones_like(docx_ids)\n\t\t\t\t\teffective_docx_mask = tf.ones_like(docx_ids)\n\n\n\t\t\t\t\tlabel = tf.cast(sample[\"label\"], tf.int32)\n\n\t\t\t\t\tfeatures = {\n\t\t\t\t\t\t\"query_ids\": query_ids,\n\t\t\t\t\t\t\"query_segment_ids\": query_segment_ids,\n\t\t\t\t\t\t\"query_mask\": query_mask,\n\t\t\t\t\t\t\"effective_query_mask\": effective_query_mask,\n\t\t\t\t\t\t\"docx_ids\": docx_ids,\n\t\t\t\t\t\t\"docx_segment_ids\": docx_segment_ids,\n\t\t\t\t\t\t\"docx_mask\": docx_mask,\n\t\t\t\t\t\t\"effective_docx_mask\": effective_docx_mask,\n\t\t\t\t\t\t\"label\": label,\n\t\t\t\t\t}\n\n\n\t\t\treturn features\n\n\t\tdataset = tf.data.TFRecordDataset([dataset_path])\n\t\tdataset = dataset.map(\n\t\t\textract_fn, num_parallel_calls=4).prefetch(output_buffer_size)\n\n\t\tif is_training:\n\t\t\tdataset = dataset.repeat()\n\t\t\tdataset = dataset.shuffle(buffer_size=1000)\n\t\t\tdataset = dataset.padded_batch(\n\t\t\t\t\t\tbatch_size=batch_size,\n\t\t\t\t\t\tpadded_shapes={\n\t\t\t\t\t\t\t\"raw_query_ids\": [max_q_len],\n\t\t\t\t\t\t\t\"raw_query_segment_ids\": [max_q_len],\n\t\t\t\t\t\t\t\"raw_query_mask\": [max_q_len],\n\t\t\t\t\t\t\t\"effective_raw_query_mask\": [max_q_len],\n\t\t\t\t\t\t\t\"rewrite_query_ids\": [max_q_len],\n\t\t\t\t\t\t\t\"rewrite_query_segment_ids\": [max_q_len],\n\t\t\t\t\t\t\t\"rewrite_query_mask\": [max_q_len],\n\t\t\t\t\t\t\t\"effective_rewrite_query_mask\": [max_q_len],\n\t\t\t\t\t\t\t\"doc0_ids\": [max_p_len],\n\t\t\t\t\t\t\t\"doc0_segment_ids\": [max_p_len],\n\t\t\t\t\t\t\t\"doc0_mask\": [max_p_len],\n\t\t\t\t\t\t\t\"effective_doc0_mask\": [max_p_len],\n\t\t\t\t\t\t\t\"doc1_ids\": [max_p_len],\n\t\t\t\t\t\t\t\"doc1_segment_ids\": [max_p_len],\n\t\t\t\t\t\t\t\"doc1_mask\": [max_p_len],\n\t\t\t\t\t\t\t\"effective_doc1_mask\": [max_p_len],\n\t\t\t\t\t\t\t\"label\": []\n\t\t\t\t\t\t},\n\t\t\t\t\t\tpadding_values={\n\t\t\t\t\t\t\t\"raw_query_ids\": 0,\n\t\t\t\t\t\t\t\"raw_query_segment_ids\": 0,\n\t\t\t\t\t\t\t\"raw_query_mask\": 0,\n\t\t\t\t\t\t\t\"effective_raw_query_mask\":0,\n\t\t\t\t\t\t\t\"rewrite_query_ids\": 0,\n\t\t\t\t\t\t\t\"rewrite_query_segment_ids\": 0,\n\t\t\t\t\t\t\t\"rewrite_query_mask\": 0,\n\t\t\t\t\t\t\t\"effective_rewrite_query_mask\":0,\n\t\t\t\t\t\t\t\"doc0_ids\": 0,\n\t\t\t\t\t\t\t\"doc0_segment_ids\": 0,\n\t\t\t\t\t\t\t\"doc0_mask\": 0,\n\t\t\t\t\t\t\t\"effective_doc0_mask\": 0,\n\t\t\t\t\t\t\t\"doc1_ids\": 0,\n\t\t\t\t\t\t\t\"doc1_segment_ids\": 0,\n\t\t\t\t\t\t\t\"doc1_mask\": 0,\n\t\t\t\t\t\t\t\"effective_doc1_mask\": 0,\n\t\t\t\t\t\t\t\"label\": 0.0,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tdrop_remainder=True)\n\t\telse:\n\t\t\tif max_eval_examples:\n\t\t\t\t# Use at most this number of examples (debugging only).\n\t\t\t\tdataset = dataset.take(max_eval_examples)\n\t\t\tif is_output:\n\t\t\t\tdataset = dataset.padded_batch(\n\t\t\t\t\t\t\tbatch_size=batch_size,\n\t\t\t\t\t\t\tpadded_shapes={\n\t\t\t\t\t\t\t\t\"input_ids\": [max_seq_len[doc_type]],\n\t\t\t\t\t\t\t\t\"segment_ids\": [max_seq_len[doc_type]],\n\t\t\t\t\t\t\t\t\"input_mask\": [max_seq_len[doc_type]],\n\t\t\t\t\t\t\t\t\"effective_input_mask\": [max_seq_len[doc_type]],\n\t\t\t\t\t\t\t\t\"docid\": [],\n\t\t\t\t\t\t\t\t\"label\": [],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tpadding_values={\n\t\t\t\t\t\t\t\t\"input_ids\": 0,\n\t\t\t\t\t\t\t\t\"segment_ids\": 0,\n\t\t\t\t\t\t\t\t\"input_mask\": 0,\n\t\t\t\t\t\t\t\t\"effective_input_mask\": 0,\n\t\t\t\t\t\t\t\t\"docid\": 0,\n\t\t\t\t\t\t\t\t\"label\": 0,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tdrop_remainder=True)\n\n\t\t\telif is_eval:\n\t\t\t\tdataset = dataset.padded_batch(\n\t\t\t\t\t\t\t\tbatch_size=batch_size,\n\t\t\t\t\t\t\t\tpadded_shapes={\n\t\t\t\t\t\t\t\t\t\"query_ids\": [max_q_len],\n\t\t\t\t\t\t\t\t\t\"query_segment_ids\": [max_q_len],\n\t\t\t\t\t\t\t\t\t\"query_mask\": [max_q_len],\n\t\t\t\t\t\t\t\t\t\"effective_query_mask\": [max_q_len],\n\t\t\t\t\t\t\t\t\t\"docx_ids\": [max_p_len],\n\t\t\t\t\t\t\t\t\t\"docx_segment_ids\": [max_p_len],\n\t\t\t\t\t\t\t\t\t\"docx_mask\": [max_p_len],\n\t\t\t\t\t\t\t\t\t\"effective_docx_mask\": [max_p_len],\n\t\t\t\t\t\t\t\t\t\"label\": []\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tpadding_values={\n\t\t\t\t\t\t\t\t\t\"query_ids\": 0,\n\t\t\t\t\t\t\t\t\t\"query_segment_ids\": 0,\n\t\t\t\t\t\t\t\t\t\"query_mask\": 0,\n\t\t\t\t\t\t\t\t\t\"effective_query_mask\": 0,\n\t\t\t\t\t\t\t\t\t\"docx_ids\": 0,\n\t\t\t\t\t\t\t\t\t\"docx_segment_ids\": 0,\n\t\t\t\t\t\t\t\t\t\"docx_mask\": 0,\n\t\t\t\t\t\t\t\t\t\"effective_docx_mask\":0,\n\t\t\t\t\t\t\t\t\t\"label\": 0,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tdrop_remainder=True)\n\t\treturn dataset\n\treturn input_fn", "def _calc2call(func):\n def _converter(inp, *x):\n if func.n_inputs == 1:\n retvals = func.evaluate(x[0], *inp)\n else:\n retvals = func.evaluate(x[0], x[1], *inp)\n return retvals\n return _converter", "def make_input_fn_from_generator(gen, use_static_shapes=False):\n first_ex = six.next(gen)\n flattened = contrib.framework().nest.flatten(first_ex)\n types = [t.dtype for t in flattened]\n if use_static_shapes:\n shapes = [t.shape for t in flattened]\n else:\n shapes = [(None,) * len(t.shape) for t in flattened]\n first_ex_list = [first_ex]\n\n def py_func():\n if first_ex_list:\n example = first_ex_list.pop()\n else:\n example = six.next(gen)\n return contrib.framework().nest.flatten(example)\n\n def input_fn():\n flat_example = tf.py_func(py_func, [], types)\n _ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]\n example = contrib.framework().nest.pack_sequence_as(first_ex, flat_example)\n return example\n\n return input_fn", "def __init__(__self__, *,\n function_name: pulumi.Input[str],\n input: pulumi.Input[str],\n qualifier: Optional[pulumi.Input[str]] = None,\n triggers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"function_name\", function_name)\n pulumi.set(__self__, \"input\", input)\n if qualifier is not None:\n pulumi.set(__self__, \"qualifier\", qualifier)\n if triggers is not None:\n pulumi.set(__self__, \"triggers\", triggers)", "def input_name_from_func_name(func_name):\n\treturn os.path.join(INPUTS_DIR, ''.join(func_name.split('make_')[1:])) \\\n\t\t\t+ '.%s' % EXTENSION", "def get_input_fn(\n doc_dir,\n semi_dir,\n sent_dir,\n split,\n uncased,\n seq_len,\n num_predict,\n bsz_per_host,\n num_hosts=1,\n num_core_per_host=1,\n use_bfloat16=False,\n **kwargs):\n\n def dir_to_paths(data_dir, data_type):\n \"\"\"Get data file paths in the given dir.\"\"\"\n file_paths = []\n\n if data_dir:\n tf.logging.info(\"=\" * 120)\n\n case_str = \"uncased.\" if uncased else \"\"\n glob_base = \"data.{}.{}.{}tfrecord*\".format(split, data_type, case_str)\n\n for idx, dir_path in enumerate(data_dir.split(\",\")):\n glob = os.path.join(dir_path, glob_base)\n cur_file_paths = sorted(tf.io.gfile.glob(glob))\n file_paths += cur_file_paths\n\n tf.logging.info(\"[%d] Data glob: %s\", idx, glob)\n tf.logging.info(\"[%d] Num of file path: %d\", idx, len(cur_file_paths))\n\n tf.logging.info(\"[%s] Total number of file path: %d\", data_type,\n len(file_paths))\n\n return file_paths\n\n doc_files = dir_to_paths(doc_dir, \"doc\")\n semi_files = dir_to_paths(semi_dir, \"doc\")\n sent_files = dir_to_paths(sent_dir, \"sent\")\n\n file_list = [doc_files, semi_files, sent_files]\n func_list = [doc_mass_dataset, semidoc_mass_dataset, sent_mass_dataset]\n\n def input_fn(params):\n \"\"\"Construct input function for TPUEstimator.\"\"\"\n assert params[\"batch_size\"] * num_core_per_host == bsz_per_host\n\n datasets = []\n for files, func in zip(file_list, func_list):\n if files:\n cur_dataset = func(\n params=params,\n num_hosts=num_hosts,\n num_core_per_host=num_core_per_host,\n is_training=split == \"train\",\n file_names=files,\n seq_len=seq_len,\n num_predict=num_predict,\n use_bfloat16=use_bfloat16,\n **kwargs)\n\n datasets.append(cur_dataset)\n\n if len(datasets) > 1:\n dataset = tf.data.experimental.sample_from_datasets(datasets)\n elif len(datasets) == 1:\n dataset = datasets[0]\n\n return dataset\n\n return input_fn", "def make_input_fn_from_generator(gen):\n first_ex = six.next(gen)\n flattened = tf.contrib.framework.nest.flatten(first_ex)\n types = [t.dtype for t in flattened]\n shapes = [[None] * len(t.shape) for t in flattened]\n first_ex_list = [first_ex]\n\n def py_func():\n if first_ex_list:\n example = first_ex_list.pop()\n else:\n example = six.next(gen)\n return tf.contrib.framework.nest.flatten(example)\n\n def input_fn():\n flat_example = tf.py_func(py_func, [], types)\n _ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]\n example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example)\n return example\n\n return input_fn", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n dataFilename = paramInput.findFirst('dataFilename')\n if dataFilename != None:\n self.dataFilename = os.path.join(self.workingDir,dataFilename.value)\n else:\n self.raiseAnError(IOError,'<dataFilename> parameter needed for MultiDimensional Distributions!!!!')\n\n functionType = dataFilename.parameterValues['type']\n if functionType != None:\n self.functionType = functionType\n else:\n self.raiseAnError(IOError,'<functionType> parameter needed for MultiDimensional Distributions!!!!')\n\n self.initializeDistribution()", "def addTransform(self,input,output,func):\n import inspect\n from collections import defaultdict\n\n try:\n args, varargs, varkw, defaults = inspect.getargspec(func)\n if len(args)-1 > len(defaults) or varkw:\n raise TypeError('input function must take one argument')\n except TypeError:\n raise TypeError('input func is not a callable')\n\n #make sure we are in the instance\n if '_inputtransforms' not in self.__dict__:\n dd = defaultdict(dict)\n dd.update(self._inputtransforms)\n self._inputtransforms = dd\n\n dd[input][output] = func", "def createTransformFunc(self):\n raise NotImplementedError()", "def input_reg_fn_builder(features, seq_length, is_training, drop_remainder):\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.float32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=1000)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size = 100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record:_decode_record(record, name_to_features),\n batch_size = batch_size,\n drop_remainder=drop_remainder))\n return d", "def json_serving_input_fn():\n inputs = {}\n for feat in INPUT_COLUMNS:\n inputs[feat.name] = tf.placeholder(shape=[None], dtype=feat.dtype)\n\n features = {\n key: tf.expand_dims(tensor, -1)\n for key, tensor in inputs.iteritems()\n }\n return tf.contrib.learn.InputFnOps(features, None, inputs)", "def input_fn():\n features = {\n feature_name: tf.constant(features_np_list[i])\n for i, feature_name in enumerate(feature_names)\n }\n return tf.data.Dataset.zip((tf.data.Dataset.from_tensors(features),\n tf.data.Dataset.from_tensors(label_np),))", "def input_fn():\n # It's important to build all the tensors together in one DataFrame.\n # If we did df.select() for both key sets and then build those, the two\n # resulting DataFrames would be shuffled independently.\n tensors = limited_dataframe.build(**kwargs)\n\n base_input_features = {key: tensors[key] for key in base_input_keys}\n labels = {key: tensors[key] for key in label_keys}\n\n # TODO(soergel): Remove this special case when b/30367437 is fixed.\n if len(labels) == 1:\n labels = list(labels.values())[0]\n\n return base_input_features, labels", "def file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder,batch_size,buffer_size):\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n # batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=buffer_size)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn", "def _serving_input_fn(self):\n seq = tf.placeholder(dtype=tf.float32, shape=[None, None], name='seq')\n features = {'seq': seq}\n return tf.estimator.export.build_raw_serving_input_receiver_fn(features)", "def file_based_input_fn_builder(input_file,\n batch_size,\n seq_length,\n is_training,\n drop_remainder,\n hvd=None):\n\n name_to_features = {\n \"input_ids\": tf.io.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.io.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.io.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.io.FixedLenFeature([], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n \"\"\"The actual input function.\"\"\"\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n if hvd is not None: d = d.shard(hvd.size(), hvd.rank())\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d", "def get_input_fn_dataset(pattern, flags, batch_size):\n print 'Using dataset input fn'\n\n def input_fn(params=None):\n \"\"\"Input function using Dataset for TPU.\"\"\"\n del params\n full_pattern = os.path.join(flags.data_dir, pattern)\n dataset = tf.data.Dataset.list_files(full_pattern)\n\n if flags.initial_shuffle_buffer_size > 0:\n dataset = dataset.shuffle(buffer_size=flags.initial_shuffle_buffer_size)\n dataset = dataset.repeat()\n\n # use interleave() and prefetch() to read many files concurrently\n def prefetch_map_fn(filename):\n return tf.data.TFRecordDataset(filename).prefetch(batch_size)\n\n if flags.prefetch_enabled:\n dataset = dataset.interleave(\n prefetch_map_fn,\n cycle_length=flags.cycle_length,\n block_length=batch_size)\n\n if flags.followup_shuffle_buffer_size > 0:\n dataset = dataset.shuffle(buffer_size=flags.followup_shuffle_buffer_size)\n\n frame_nums = range(0, flags.sequence_length, flags.skip_num)\n\n def parser(_, serialized_example):\n \"\"\"Parses a single example.\"\"\"\n features = {}\n\n for i in frame_nums:\n image_name = 'image_' + str(i)\n if flags.dataset_type == 'robot':\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n\n features[pose_name] = tf.FixedLenFeature([flags.pose_dim], tf.float32)\n features[image_name] = tf.FixedLenFeature([1], tf.string)\n features[action_name] = tf.FixedLenFeature([flags.pose_dim],\n tf.float32)\n features[joint_pos_name] = tf.FixedLenFeature([flags.joint_pos_dim],\n tf.float32)\n else:\n features[image_name] = tf.FixedLenFeature([1], tf.string)\n\n parsed_input = tf.parse_single_example(serialized_example, features)\n\n for i in frame_nums:\n image_name = 'image_' + str(i)\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n\n # Process image\n image_buffer = tf.reshape(parsed_input[image_name], shape=[])\n image = tf.image.decode_jpeg(image_buffer, channels=COLOR_CHAN)\n image = tf.image.resize_images(\n image, (IMG_HEIGHT, IMG_WIDTH),\n method=tf.image.ResizeMethod.BICUBIC)\n image = tf.cast(tf.expand_dims(image, 0), tf.float32) / 255.0\n\n if flags.dataset_type == 'robot':\n pose = tf.reshape(parsed_input[pose_name], shape=[flags.pose_dim])\n pose = tf.expand_dims(pose, 0)\n action = tf.reshape(parsed_input[action_name], shape=[flags.pose_dim])\n action = tf.expand_dims(action, 0)\n joint_pos = tf.reshape(\n parsed_input[joint_pos_name], shape=[flags.joint_pos_dim])\n joint_pos = tf.expand_dims(joint_pos, 0)\n else:\n pose = tf.zeros([1, flags.pose_dim])\n action = tf.zeros([1, flags.pose_dim])\n joint_pos = tf.zeros([1, flags.joint_pos_dim])\n\n if i == 0:\n image_seq = image\n action_seq, pose_seq, joint_pos_seq = action, pose, joint_pos\n else:\n image_seq = tf.concat([image_seq, image], 0)\n action_seq = tf.concat([action_seq, action], 0)\n pose_seq = tf.concat([pose_seq, pose], 0)\n joint_pos_seq = tf.concat([joint_pos_seq, joint_pos], 0)\n\n return image_seq, action_seq, action_seq, joint_pos_seq\n\n dataset = dataset.map(\n parser,\n num_parallel_calls=flags.num_parallel_calls).prefetch(batch_size)\n\n dataset = dataset.batch(batch_size)\n\n # use prefetch to overlap producer and consumer\n dataset = dataset.prefetch(1)\n\n images, actions, poses, joint_pos = dataset.make_one_shot_iterator(\n ).get_next()\n\n images.set_shape([batch_size, len(frame_nums), IMG_HEIGHT, IMG_WIDTH, 3])\n actions.set_shape([batch_size, len(frame_nums), flags.pose_dim])\n poses.set_shape([batch_size, len(frame_nums), flags.pose_dim])\n joint_pos.set_shape([batch_size, len(frame_nums), flags.joint_pos_dim])\n\n joint_poses = tf.concat([joint_pos, poses], 2)\n\n output_features = {\n IMAGE_FEATURE_NAME: images,\n JOINT_POSE_FEATURE_NAME: joint_poses,\n ACTION_FEATURE_NAME: actions\n }\n\n return output_features, None\n\n return input_fn", "def fn(name : str, *, input : 'NET', gnd : 'NET', output : 'NET'):\n return make_component(name, type, {\"in\": input, \"gnd\":gnd, \"out\":output}, [], {'voltage': voltage}, prefix=\"U\")", "def input_fn():\n bos_id = tf.constant(BOS_ID, tf.int32)\n eos_id = tf.constant(EOS_ID, tf.int32)\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_files)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.map(lambda record: _decode_record(record, name_to_features))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n tf.concat([[bos_id], src_ids, [eos_id]], 0),\n tf.concat([tgt_ids, [eos_id]], 0),\n label))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n src_ids[:FLAGS.max_sequence_length],\n tgt_ids[:FLAGS.max_sequence_length],\n label\n ))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n tf.concat([src_ids, tgt_ids], 0),\n tf.concat([tf.zeros_like(src_ids), tf.ones_like(tgt_ids)], 0),\n label\n ))\n\n d = d.map(lambda input_ids, segment_ids, label_ids: (\n input_ids,\n segment_ids,\n tf.ones_like(input_ids),\n label_ids\n ))\n\n def batching_func(x):\n return x.padded_batch(\n batch_size,\n # The entry is the source line rows;\n # this has unknown-length vectors. The last entry is\n # the source row size; this is a scalar.\n padded_shapes=(\n tf.TensorShape([None]), # src\n tf.TensorShape([None]), # tgt\n tf.TensorShape([None]),\n tf.TensorShape([])), # src_len\n # Pad the source sequences with eos tokens.\n # (Though notice we don't generally need to do this since\n # later on we will be masking out calculations past the true sequence.\n padding_values=(\n PAD_ID, # src\n PAD_ID,\n PAD_ID,\n 0)) # src_len -- unused\n\n batched_dataset = batching_func(d)\n features = batched_dataset.map(lambda input_ids, segment_ids, input_mask, label:\n {\n \"input_ids\": input_ids,\n \"segment_ids\": segment_ids,\n \"input_mask\": input_mask,\n \"label_ids\": label\n\n })\n\n return features", "def open_input(self, fn):\n\treturn (None, None)", "def validate_input_fn(self) -> types.FeatureAndLabelTensors:\n return self._input_fn_from_file(self._validate_path)", "def _make_training_input_fn(tft_working_dir,\n filebase,\n num_epochs=None,\n shuffle=True,\n batch_size=200,\n buffer_size=None,\n prefetch_buffer_size=1):\n if buffer_size is None:\n buffer_size = 2 * batch_size + 1\n\n # Examples have already been transformed so we only need the feature_columns\n # to parse the single the tf.Record\n\n transformed_metadata = metadata_io.read_metadata(\n os.path.join(\n tft_working_dir, transform_fn_io.TRANSFORMED_METADATA_DIR))\n transformed_feature_spec = transformed_metadata.schema.as_feature_spec()\n\n def parser(record):\n \"\"\"Help function to parse tf.Example\"\"\"\n parsed = tf.parse_single_example(record, transformed_feature_spec)\n label = parsed.pop(LABEL_KEY)\n return parsed, label\n\n def input_fn():\n \"\"\"Input function for training and eval.\"\"\"\n files = tf.data.Dataset.list_files(os.path.join(\n tft_working_dir, filebase + '*'))\n dataset = files.interleave(\n tf.data.TFRecordDataset, cycle_length=4, block_length=16)\n dataset = dataset.map(parser)\n\n if shuffle:\n dataset = dataset.shuffle(buffer_size)\n\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n\n dataset = dataset.prefetch(prefetch_buffer_size)\n iterator = dataset.make_one_shot_iterator()\n transformed_features, transformed_labels = iterator.get_next()\n\n return transformed_features, transformed_labels\n\n return input_fn", "def input_fn_builder(dataset_path, seq_length, is_training,\n max_eval_examples=None):\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n\n batch_size = params[\"batch_size\"]\n output_buffer_size = batch_size * 1000\n\n def extract_fn(data_record):\n features = {\n \"query_ids\": tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True),\n \"doc_ids\": tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True),\n \"label\": tf.FixedLenFeature([], tf.int64),\n }\n sample = tf.parse_single_example(data_record, features)\n\n query_ids = tf.cast(sample[\"query_ids\"], tf.int32)\n doc_ids = tf.cast(sample[\"doc_ids\"], tf.int32)\n label_ids = tf.cast(sample[\"label\"], tf.int32)\n input_ids = tf.concat((query_ids, doc_ids), 0)\n\n query_segment_id = tf.zeros_like(query_ids)\n doc_segment_id = tf.ones_like(doc_ids)\n segment_ids = tf.concat((query_segment_id, doc_segment_id), 0)\n\n input_mask = tf.ones_like(input_ids)\n\n features = {\n \"input_ids\": input_ids,\n \"segment_ids\": segment_ids,\n \"input_mask\": input_mask,\n \"label_ids\": label_ids,\n }\n return features\n\n dataset = tf.data.TFRecordDataset([dataset_path])\n dataset = dataset.map(\n extract_fn, num_parallel_calls=4).prefetch(output_buffer_size)\n\n if is_training:\n dataset = dataset.repeat()\n dataset = dataset.shuffle(buffer_size=1000)\n else:\n if max_eval_examples:\n # Use at most this number of examples (debugging only).\n dataset = dataset.take(max_eval_examples)\n # pass\n\n dataset = dataset.padded_batch(\n batch_size=batch_size,\n padded_shapes={\n \"input_ids\": [seq_length],\n \"segment_ids\": [seq_length],\n \"input_mask\": [seq_length],\n \"label_ids\": [],\n },\n padding_values={\n \"input_ids\": 0,\n \"segment_ids\": 0,\n \"input_mask\": 0,\n \"label_ids\": 0,\n },\n drop_remainder=True)\n\n return dataset\n return input_fn", "def processInputs(self):", "def get_transform_fn():", "def _eval_input_fn():\n features_placeholder = {\n k: tf.compat.v1.placeholder(v.dtype, v.shape)\n for k, v in six.iteritems(features)\n }\n if use_multi_head:\n placeholder = tf.compat.v1.placeholder(labels.dtype, labels.shape)\n labels_placeholder = {\n _PRIMARY_HEAD: placeholder,\n _SECONDARY_HEAD: placeholder,\n }\n else:\n labels_placeholder = tf.compat.v1.placeholder(labels.dtype, labels.shape)\n dataset = tf.data.Dataset.from_tensors(\n (features_placeholder, labels_placeholder))\n iterator = tf.compat.v1.data.make_initializable_iterator(dataset)\n if use_multi_head:\n feed_dict = {\n labels_placeholder[head_name]: labels\n for head_name in labels_placeholder\n }\n else:\n feed_dict = {labels_placeholder: labels}\n\n feed_dict.update(\n {features_placeholder[k]: features[k] for k in features_placeholder})\n iterator_initializer_hook.iterator_initializer_fn = (\n lambda sess: sess.run(iterator.initializer, feed_dict=feed_dict))\n return iterator.get_next()", "def call(self, inputs, **kwargs): # pylint: disable=unused-argument\n return inputs", "def tool_factory_function(sources, alignment_node=None, **parameters):\n if not self.current_workflow:\n raise ValueError(\"No workflow context - use create_workflow first\")\n\n # find matching tools (possibly different parameters)\n matches = [f for f in self.current_workflow.factors if f.tool.__class__ == tool_func]\n # make sure parameters are all the same\n full_matches = [m for m in matches if m.sources == sources\n and m.alignment_node == alignment_node\n and dict(m.tool.parameters_dict) == parameters]\n\n if len(full_matches) == 1:\n tool = full_matches[0].tool\n else:\n tool = tool_func(**parameters)\n\n return dict(\n workflow=self.current_workflow,\n tool=tool,\n sources=sources,\n alignment_node=alignment_node)", "def serve_function(self):\n # Set name attribute of the input TensorSpec.\n input_signature = {\n name: tf.TensorSpec.from_spec(spec, name=name)\n for name, spec in self.features_inputter.input_signature().items()\n }\n\n @tf.function(input_signature=(input_signature,))\n def _run(features):\n features = self.features_inputter.make_features(features=features.copy())\n _, predictions = self(features)\n return predictions\n\n return _run", "def check_inputs(function):\n def decorated(self, data, *args, **kwargs):\n if not (isinstance(data, np.ndarray) and len(data.shape) == 2 and data.shape[1] == 1):\n raise ValueError('The argument `data` must be a numpy.ndarray with shape (n, 1).')\n\n return function(self, data, *args, **kwargs)\n\n decorated.__doc__ = function.__doc__\n return decorated", "def main(fn_input, fn_output):\n # read file\n inter = Interpolator()\n inter.read_file(fn_input)\n inter.write_interpolated(fn_output)" ]
[ "0.77863973", "0.70128167", "0.6983106", "0.6836533", "0.6774335", "0.6678941", "0.66704524", "0.6666463", "0.66138333", "0.66047716", "0.6547102", "0.6434819", "0.6386017", "0.63624465", "0.63570637", "0.63555074", "0.634394", "0.6342538", "0.6278813", "0.62751496", "0.6273507", "0.62570935", "0.62471926", "0.6246571", "0.61696845", "0.61693", "0.61679435", "0.6164851", "0.6162882", "0.61613905", "0.6160645", "0.6156167", "0.6152389", "0.61460483", "0.61389333", "0.6115024", "0.61088234", "0.6099416", "0.6098542", "0.6098507", "0.60968983", "0.609548", "0.609407", "0.6083389", "0.60704404", "0.6064605", "0.606229", "0.60554427", "0.60479563", "0.60207844", "0.6017801", "0.6014474", "0.6006519", "0.5998695", "0.598649", "0.59781826", "0.5974183", "0.59625286", "0.5944767", "0.5931114", "0.5927264", "0.59148574", "0.5870204", "0.5845631", "0.58329904", "0.5805288", "0.5795677", "0.5771705", "0.5763839", "0.5761288", "0.5751368", "0.5747375", "0.5731432", "0.57311994", "0.5727629", "0.57224375", "0.57203835", "0.57182336", "0.5708115", "0.5700431", "0.5696802", "0.5695923", "0.56894314", "0.5685438", "0.56823504", "0.56806064", "0.5676713", "0.5655731", "0.5646867", "0.56466395", "0.564343", "0.56429684", "0.5639546", "0.5635732", "0.56235623", "0.5619358", "0.5617281", "0.56148165", "0.56107485", "0.5606804", "0.5598228" ]
0.0
-1
Parse TFExample records and perform simple data augmentation.
def parse_fn(example): example_fmt = { "image": tf.FixedLenFeature((), tf.string), "target": tf.FixedLenFeature((), tf.float32, -1) } parsed = tf.parse_single_example(example, example_fmt) if return_full_size_image: preprocessed_image, full_size_image = _image_preprocess_fn( image_buffer=parsed["image"], input_height=299, input_width=299, input_mean=128, input_std=128, return_full_size_image=True) return preprocessed_image, parsed["target"], full_size_image preprocessed_image = _image_preprocess_fn(image_buffer=parsed["image"], input_height=299, input_width=299, input_mean=128, input_std=128) return preprocessed_image, parsed["target"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parser(record):\n record_spec = {\n \"input\": tf.FixedLenFeature([seq_len], tf.int64),\n \"labels\": tf.FixedLenFeature([tgt_len], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_len],tf.float32),\n \"target_mask\": tf.FixedLenFeature([tgt_len],tf.float32)\n }\n\n # retrieve serialized example\n example = tf.parse_single_example(\n serialized=record,\n features=record_spec)\n\n _convert_example(example, use_bfloat16)\n\n for k, v in example.items():\n tf.logging.info(\"%s: %s\", k, v)\n\n return example", "def parser(record):\n\n record_spec = {\n \"inputs\": tf.VarLenFeature(tf.int64),\n \"type_id\": tf.FixedLenFeature([1], tf.int64),\n }\n\n # retrieve serialized example\n example = tf.parse_single_example(\n serialized=record,\n features=record_spec)\n\n inputs = example[\"inputs\"]\n inp_len = tf.shape(inputs)[0]\n\n # expand type id to full length\n example[\"type_id\"] = tf.broadcast_to(example[\"type_id\"], [inp_len])\n\n # convert all sparse example to dense\n example = sparse_to_dense(example)\n\n return example", "def _parse_single_example(example, options):\n # Initialize `keys_to_features`.\n keys_to_features = {\n TFExampleFields.img_id: tf.io.FixedLenFeature([], tf.string),\n TFExampleFields.annot_id: tf.io.FixedLenFeature([], tf.string),\n TFExampleFields.answer_label: tf.io.FixedLenFeature([], tf.int64),\n TFExampleFields.img_bbox_label: tf.io.VarLenFeature(tf.string),\n TFExampleFields.img_bbox_score: tf.io.VarLenFeature(tf.float32),\n TFExampleFields.img_bbox_feature: tf.io.VarLenFeature(tf.float32),\n TFExampleFields.question: tf.io.VarLenFeature(tf.string),\n TFExampleFields.question_tag: tf.io.VarLenFeature(tf.int64),\n }\n for bbox_key in TFExampleFields.img_bbox_field_keys:\n bbox_field = os.path.join(TFExampleFields.img_bbox_scope, bbox_key)\n keys_to_features[bbox_field] = tf.io.VarLenFeature(tf.float32)\n for i in range(1, 1 + NUM_CHOICES):\n keys_to_features.update({\n TFExampleFields.cls_bert + '_%i' % i:\n tf.io.VarLenFeature(tf.float32),\n TFExampleFields.question_bert + '_%i' % i:\n tf.io.VarLenFeature(tf.float32),\n TFExampleFields.answer_choice + '_%i' % i:\n tf.io.VarLenFeature(tf.string),\n TFExampleFields.answer_choice_tag + '_%i' % i:\n tf.io.VarLenFeature(tf.int64),\n TFExampleFields.answer_choice_bert + '_%i' % i:\n tf.io.VarLenFeature(tf.float32)\n })\n\n # Initialize `items_to_handlers`.\n items_to_handlers = {\n InputFields.img_id:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.img_id,\n default_value=''),\n InputFields.annot_id:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.annot_id,\n default_value=''),\n InputFields.answer_label:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.answer_label,\n default_value=-1),\n InputFields.object_bboxes:\n tfexample_decoder.BoundingBox(\n keys=TFExampleFields.img_bbox_field_keys,\n prefix=TFExampleFields.img_bbox_scope),\n InputFields.object_labels:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.img_bbox_label,\n default_value=''),\n InputFields.object_scores:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.img_bbox_score,\n default_value=0),\n InputFields.question:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.question,\n default_value=PAD),\n InputFields.question_tag:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.question_tag,\n default_value=-1),\n TFExampleFields.img_bbox_feature:\n tfexample_decoder.Tensor(tensor_key=TFExampleFields.img_bbox_feature,\n default_value=0),\n }\n\n for i in range(1, 1 + NUM_CHOICES):\n tensor_key = TFExampleFields.cls_bert + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=0)\n tensor_key = TFExampleFields.question_bert + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=0)\n tensor_key = TFExampleFields.answer_choice + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=PAD)\n tensor_key = TFExampleFields.answer_choice_tag + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=-1)\n tensor_key = TFExampleFields.answer_choice_bert + '_%i' % i\n items_to_handlers[tensor_key] = tfexample_decoder.Tensor(\n tensor_key=tensor_key, default_value=0)\n if options.decode_jpeg:\n keys_to_features.update({\n TFExampleFields.img_encoded: tf.io.FixedLenFeature([], tf.string),\n TFExampleFields.img_format: tf.io.FixedLenFeature([], tf.string),\n })\n items_to_handlers.update({\n InputFields.img_data:\n tfexample_decoder.Image(image_key=TFExampleFields.img_encoded,\n format_key=TFExampleFields.img_format,\n shape=None)\n })\n\n # Decode example.\n example_decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,\n items_to_handlers)\n\n output_keys = example_decoder.list_items()\n output_tensors = example_decoder.decode(example)\n output_tensors = [\n x if x.dtype != tf.int64 else tf.cast(x, tf.int32) for x in output_tensors\n ]\n decoded_example = dict(zip(output_keys, output_tensors))\n return _update_decoded_example(decoded_example, options)", "def _read_tf_example(self,\n record: tf.Tensor,\n feature_preprocessor: Callable[[str], List[str]]\n ) -> types.FeatureAndLabelTensors:\n\n keys_to_features = {}\n keys_to_features[self._text_feature] = tf.FixedLenFeature([], tf.string)\n for label, dtype in self._labels.items():\n keys_to_features[label] = tf.FixedLenFeature([], dtype)\n parsed = tf.parse_single_example(\n record, keys_to_features) # type: Dict[str, types.Tensor]\n\n text = parsed[self._text_feature]\n # I think this could be a feature column, but feature columns seem so beta.\n preprocessed_text = feature_preprocessor(text)\n features = {self._text_feature: preprocessed_text}\n if self._round_labels:\n labels = {label: tf.round(parsed[label]) for label in self._labels}\n else:\n labels = {label: parsed[label] for label in self._labels}\n\n return features, labels", "def _parse(serialized_example):\n\n feature_map = {\n 'dayofweek': tf.io.FixedLenFeature([], tf.int64),\n 'dropofflat': tf.io.FixedLenFeature([], tf.float32),\n 'dropofflon': tf.io.FixedLenFeature([], tf.float32),\n 'fare_amount': tf.io.FixedLenFeature([], tf.float32),\n 'hourofday': tf.io.FixedLenFeature([], tf.int64),\n 'passengers': tf.io.FixedLenFeature([], tf.float32),\n 'pickuplat': tf.io.FixedLenFeature([], tf.float32),\n 'pickuplon': tf.io.FixedLenFeature([], tf.float32)\n }\n\n # Parse the serialized data into a dictionary.\n parsed_example = tf.io.parse_single_example(\n serialized=serialized_example,\n features=feature_map)\n\n features = add_engineered(parsed_example)\n label = features.pop(\"fare_amount\")\n\n return features, label", "def parser(record):\n # keys_to_features = {\n # \"image_data\": tf.FixedLenFeature((), tf.string, default_value=\"\"),\n # \"date_time\": tf.FixedLenFeature((), tf.int64, default_value=\"\"),\n # \"label\": tf.FixedLenFeature((), tf.int64,\n # default_value=tf.zeros([], dtype=tf.int64)),\n # }\n\n keys_to_features = {\n \"image_data\": tf.FixedLenFeature((), tf.float, default_value=\"\"),\n \"label\": tf.FixedLenFeature((), tf.int32,\n default_value=tf.zeros([], dtype=tf.int64)),\n }\n parsed = tf.parse_single_example(record, keys_to_features)\n\n # Perform additional preprocessing on the parsed data.\n image = tf.image.decode_jpeg(parsed[\"image_data\"])\n image = tf.reshape(image, [299, 299, 1])\n label = tf.cast(parsed[\"label\"], tf.int32)\n\n return {\"image_data\": image, \"date_time\": parsed[\"date_time\"]}, label", "def tf_example_parser(example):\n def _get_feature_map():\n \"\"\"Returns data format of the serialized tf record file.\"\"\"\n return {\n # 3 sparse feature with variable length. Use this if you have a\n # variable number or more than 1 feature value per example.\n \"feature_1\":\n tf.io.VarLenFeature(dtype=tf.int64),\n \"feature_2\":\n tf.io.VarLenFeature(dtype=tf.int64),\n \"feature_3\":\n tf.io.VarLenFeature(dtype=tf.int64),\n \"label\":\n tf.io.FixedLenFeature([1], dtype=tf.int64),\n }\n example = tf.io.parse_single_example(example, _get_feature_map())\n return example", "def parse_function_augment(example_proto):\r\n\r\n\t# Parse through features and extract byte string\r\n\tparsed_features = tf.parse_single_example(example_proto,features ={\r\n\t\t'image': tf.FixedLenFeature([],tf.string),\r\n\t\t'joint': tf.FixedLenFeature([],tf.string),\r\n\t\t'offset': tf.FixedLenFeature([],tf.string),\r\n\t\t'handScale': tf.FixedLenFeature([],tf.string)\r\n\t\t},name='features')\r\n\r\n\t# Decode content into correct types\r\n\timage_dec = tf.decode_raw(parsed_features['image'],tf.float32)\r\n\tjoint_dec = tf.decode_raw(parsed_features['joint'],tf.float32)\r\n\toffset_dec = tf.decode_raw(parsed_features['offset'],tf.float32)\r\n\thandScale_dec = tf.decode_raw(parsed_features['handScale'],tf.float32)\r\n\r\n\t# Reshape image to 176x176\r\n\timage_reshaped = tf.reshape(image_dec,[176,176,1])\r\n\r\n\t# Crop 128x128 image around COM\r\n\timage_com_cropped = tf.image.crop_to_bounding_box(image_reshaped,24,24,128,128)\r\n\r\n\t# Data Augmentation\r\n\timage_com_cropped, joint_dec, offset_dec, handScale_dec = tf.py_func(augmentation_cv,[image_com_cropped, joint_dec, offset_dec, handScale_dec],[tf.float32, tf.float32, tf.float32, tf.float32])\r\n\timage_com_cropped = tf.reshape(image_com_cropped,[128,128,1])\r\n\r\n\t# TF IMPLEMENTATION OF DATA AUGMENTATION: MIGHT BE SLOWER WHEN TF IS NOT COMPILED FROM SOURCE\r\n\t# image_reshaped, joint_dec, offset_dec, handScale_dec = augmentation(image_reshaped, joint_dec, offset_dec, handScale_dec)\r\n\r\n\treturn image_com_cropped, joint_dec, offset_dec, handScale_dec", "def parser(_, serialized_example):\n features = {}\n\n for i in frame_nums:\n image_name = 'image_' + str(i)\n if flags.dataset_type == 'robot':\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n\n features[pose_name] = tf.FixedLenFeature([flags.pose_dim], tf.float32)\n features[image_name] = tf.FixedLenFeature([1], tf.string)\n features[action_name] = tf.FixedLenFeature([flags.pose_dim],\n tf.float32)\n features[joint_pos_name] = tf.FixedLenFeature([flags.joint_pos_dim],\n tf.float32)\n else:\n features[image_name] = tf.FixedLenFeature([1], tf.string)\n\n parsed_input = tf.parse_single_example(serialized_example, features)\n\n for i in frame_nums:\n image_name = 'image_' + str(i)\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n\n # Process image\n image_buffer = tf.reshape(parsed_input[image_name], shape=[])\n image = tf.image.decode_jpeg(image_buffer, channels=COLOR_CHAN)\n image = tf.image.resize_images(\n image, (IMG_HEIGHT, IMG_WIDTH),\n method=tf.image.ResizeMethod.BICUBIC)\n image = tf.cast(tf.expand_dims(image, 0), tf.float32) / 255.0\n\n if flags.dataset_type == 'robot':\n pose = tf.reshape(parsed_input[pose_name], shape=[flags.pose_dim])\n pose = tf.expand_dims(pose, 0)\n action = tf.reshape(parsed_input[action_name], shape=[flags.pose_dim])\n action = tf.expand_dims(action, 0)\n joint_pos = tf.reshape(\n parsed_input[joint_pos_name], shape=[flags.joint_pos_dim])\n joint_pos = tf.expand_dims(joint_pos, 0)\n else:\n pose = tf.zeros([1, flags.pose_dim])\n action = tf.zeros([1, flags.pose_dim])\n joint_pos = tf.zeros([1, flags.joint_pos_dim])\n\n if i == 0:\n image_seq = image\n action_seq, pose_seq, joint_pos_seq = action, pose, joint_pos\n else:\n image_seq = tf.concat([image_seq, image], 0)\n action_seq = tf.concat([action_seq, action], 0)\n pose_seq = tf.concat([pose_seq, pose], 0)\n joint_pos_seq = tf.concat([joint_pos_seq, joint_pos], 0)\n\n return image_seq, action_seq, action_seq, joint_pos_seq", "def dataset_parser(self, value):\n keys_to_features = {\n 'image/encoded':\n tf.io.FixedLenFeature((), tf.string, ''),\n 'image/format':\n tf.io.FixedLenFeature((), tf.string, 'jpeg'),\n 'image/class/label':\n tf.io.FixedLenFeature([], tf.int64, -1),\n 'image/class/text':\n tf.io.FixedLenFeature([], tf.string, ''),\n 'image/object/bbox/xmin':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymin':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/xmax':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/bbox/ymax':\n tf.io.VarLenFeature(dtype=tf.float32),\n 'image/object/class/label':\n tf.io.VarLenFeature(dtype=tf.int64),\n }\n\n parsed = tf.io.parse_single_example(value, keys_to_features)\n image_bytes = tf.reshape(parsed['image/encoded'], shape=[])\n\n tensors_dict = preprocess_image(\n image_bytes=image_bytes,\n is_training=self.is_training,\n augmentation=self.augmentation,\n use_bfloat16=self.use_bfloat16,\n saturate_uint8=self.saturate_uint8,\n scale_and_center=self.scale_and_center,\n use_default_augment=self.use_default_augment)\n\n # Subtract one so that labels are in [0, 1000).\n label = tf.cast(tf.reshape(parsed['image/class/label'], shape=()) - 1,\n dtype=tf.int32)\n tensors_dict['label'] = label\n\n return tensors_dict", "def parse_example(self, serialized_example):\n # Because of RaggedTensor specs, feature_specs can be a 2-level nested dict,\n # so have to wrap `tf.io.parse_single_example` between\n # `flatten_nest_dict`/`pack_as_nest_dict`.\n # {\n # 'video/image': tf.io.FixedLenSequenceFeature(...),\n # 'video/object/bbox': {\n # 'ragged_flat_values': tf.io.FixedLenSequenceFeature(...),\n # 'ragged_row_lengths_0', tf.io.FixedLenSequenceFeature(...),\n # },\n # }\n example = tf.io.parse_single_example(\n serialized=serialized_example,\n features=self.flat_feature_specs,\n )\n example = utils.pack_as_nest_dict(example, self._nested_feature_specs)\n\n example = { # pylint:disable=g-complex-comprehension\n k: _deserialize_single_field(example_data, tensor_info)\n for k, (example_data, tensor_info) in utils.zip_dict(\n example, self._flat_example_specs\n )\n }\n # Reconstruct all nesting\n example = utils.pack_as_nest_dict(example, self._example_specs)\n return example", "def parse_tfrecord(raw_example, features_name, labels_name):\n feature_spec = {\n name: tf.io.FixedLenSequenceFeature((), tf.int64, True)\n for name in [features_name, labels_name]}\n parsed_example = tf.io.parse_single_example(\n serialized=raw_example,\n features=feature_spec)\n labels = parsed_example.pop(labels_name)\n features = parsed_example.pop(features_name)\n with tf.control_dependencies([\n tf.compat.v1.assert_equal(tf.shape(input=features), tf.shape(input=labels))\n ]):\n return features, labels", "def parse_record(raw_record):\n keys_to_features = {\n 'image/height':\n tf.FixedLenFeature((), tf.int64),\n 'image/width':\n tf.FixedLenFeature((), tf.int64),\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'label/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'label/format':\n tf.FixedLenFeature((), tf.string, default_value='png'),\n }\n\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n\n # height = tf.cast(parsed['image/height'], tf.int32)\n # width = tf.cast(parsed['image/width'], tf.int32)\n\n image = tf.image.decode_image(\n tf.reshape(parsed['image/encoded'], shape=[]), _DEPTH)\n image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))\n image.set_shape([None, None, 3])\n\n label = tf.image.decode_image(\n tf.reshape(parsed['label/encoded'], shape=[]), 1)\n label = tf.to_int32(tf.image.convert_image_dtype(label, dtype=tf.uint8))\n label.set_shape([None, None, 1])\n\n\n return image, label", "def parse_attention_example(tf_example):\n\n # specify features in attention example \n features_map = {\n 'sequence_raw': tf.FixedLenFeature([], tf.string),\n 'label_raw': tf.FixedLenFeature([], tf.string),\n 'annotation_raw': tf.FixedLenFeature([], tf.string)}\n\n # parse tf example for internal tensors\n parsed_example = tf.parse_single_example(tf_example, features_map)\n\n # decode examples\n sequence_raw = tf.decode_raw(parsed_example['sequence_raw'], tf.uint8)\n label_raw = tf.decode_raw(parsed_example['label_raw'], tf.uint8)\n annotation_raw = tf.decode_raw(parsed_example['annotation_raw'], tf.float32)\n\n # parsed tensors are flat so reshape if needed\n # cast to floats for attention task\n sequence = tf.cast(tf.reshape(sequence_raw, SEQUENCE_SHAPE), dtype=tf.float32)\n label = tf.cast(label_raw, dtype=tf.float32)\n annotation = tf.reshape(annotation_raw, ANNOTATION_SHAPE)\n\n return {'sequence': sequence, 'label': label, 'annotation': annotation}", "def _parse_sequence_example_raw(serialized,\n debug_name,\n context,\n feature_list,\n name=None):\n if context.num_features + feature_list.num_features == 0:\n raise ValueError(\"Must provide at least one feature key.\")\n with ops.name_scope(name, \"ParseSequenceExample\", [serialized]):\n debug_name = [] if debug_name is None else debug_name\n\n # Internal\n feature_list_dense_missing_assumed_empty = []\n for k, v in feature_list.dense_defaults.items():\n if v is not None:\n raise ValueError(\"Value feature_list.dense_defaults[%s] must be None\" %\n k)\n feature_list_dense_missing_assumed_empty.append(k)\n\n has_ragged = context.ragged_keys or feature_list.ragged_keys\n serialized = ops.convert_to_tensor(serialized, name=\"serialized\")\n if has_ragged and serialized.shape.ndims is None:\n raise ValueError(\"serialized must have statically-known rank to \"\n \"parse ragged features.\")\n feature_list_dense_missing_assumed_empty_vector = [\n key in feature_list_dense_missing_assumed_empty\n for key in feature_list.dense_keys\n ]\n outputs = gen_parsing_ops.parse_sequence_example_v2(\n # Inputs\n serialized=serialized,\n debug_name=debug_name,\n context_sparse_keys=context.sparse_keys,\n context_dense_keys=context.dense_keys,\n context_ragged_keys=context.ragged_keys,\n feature_list_sparse_keys=feature_list.sparse_keys,\n feature_list_dense_keys=feature_list.dense_keys,\n feature_list_ragged_keys=feature_list.ragged_keys,\n feature_list_dense_missing_assumed_empty=(\n feature_list_dense_missing_assumed_empty_vector),\n context_dense_defaults=context.dense_defaults_vec,\n # Attrs\n Ncontext_sparse=len(context.sparse_keys),\n Nfeature_list_sparse=len(feature_list.sparse_keys),\n Nfeature_list_dense=len(feature_list.dense_keys),\n context_sparse_types=context.sparse_types,\n context_ragged_value_types=context.ragged_value_types,\n context_ragged_split_types=context.ragged_split_types,\n feature_list_dense_types=feature_list.dense_types,\n feature_list_sparse_types=feature_list.sparse_types,\n feature_list_ragged_value_types=feature_list.ragged_value_types,\n feature_list_ragged_split_types=feature_list.ragged_split_types,\n context_dense_shapes=context.dense_shapes_as_proto,\n feature_list_dense_shapes=feature_list.dense_shapes,\n name=name)\n (context_sparse_indices, context_sparse_values, context_sparse_shapes,\n context_dense_values, context_ragged_values, context_ragged_row_splits,\n feature_list_sparse_indices, feature_list_sparse_values,\n feature_list_sparse_shapes, feature_list_dense_values,\n feature_list_dense_lengths, feature_list_ragged_values,\n feature_list_ragged_outer_splits,\n feature_list_ragged_inner_splits) = outputs\n # pylint: disable=protected-access\n context_ragged_tensors = parsing_config._build_ragged_tensors(\n serialized.shape, context_ragged_values, context_ragged_row_splits)\n feature_list_ragged_tensors = parsing_config._build_ragged_tensors(\n serialized.shape, feature_list_ragged_values,\n feature_list_ragged_outer_splits, feature_list_ragged_inner_splits)\n\n # pylint: disable=g-complex-comprehension\n context_sparse_tensors = [\n sparse_tensor.SparseTensor(ix, val, shape)\n for (ix, val,\n shape) in zip(context_sparse_indices, context_sparse_values,\n context_sparse_shapes)\n ]\n\n feature_list_sparse_tensors = [\n sparse_tensor.SparseTensor(ix, val, shape)\n for (ix, val, shape\n ) in zip(feature_list_sparse_indices, feature_list_sparse_values,\n feature_list_sparse_shapes)\n ]\n # pylint: enable=g-complex-comprehension\n\n context_output = dict(\n zip(\n context.sparse_keys + context.dense_keys + context.ragged_keys,\n context_sparse_tensors + context_dense_values +\n context_ragged_tensors))\n feature_list_output = dict(\n zip(\n feature_list.sparse_keys + feature_list.dense_keys +\n feature_list.ragged_keys, feature_list_sparse_tensors +\n feature_list_dense_values + feature_list_ragged_tensors))\n feature_list_lengths = dict(\n zip(feature_list.dense_keys, feature_list_dense_lengths))\n\n return (context_output, feature_list_output, feature_list_lengths)", "def parser(record):\n parsed = tf.parse_single_example(record, transformed_feature_spec)\n label = parsed.pop(LABEL_KEY)\n return parsed, label", "def extract_info_from_sequence_example(path_to_tfrecord, from_scratch=False):\n assert(os.path.isfile(path_to_tfrecord))\n\n # The csv file containing extraction result\n output_dir = os.path.dirname(path_to_tfrecord)\n yaml_name = '.do_not_modify.dataset_info.yaml'\n csv_name = '.do_not_modify.example_info.csv'\n yaml_filepath = os.path.join(output_dir, yaml_name)\n csv_filepath = os.path.join(output_dir, csv_name)\n\n if not from_scratch \\\n and os.path.isfile(yaml_filepath) \\\n and os.path.isfile(csv_filepath):\n with open(yaml_filepath, 'r') as f:\n dataset_info = yaml.load(f)\n examples_info = pd.read_csv(csv_filepath)\n if verbose:\n print(\"Successfully loaded existing dataset info and examples info.\")\n return dataset_info, examples_info\n else: # from scratch\n if verbose:\n print(\"Extracting dataset info and examples info from scratch\",\n \"(by iterating the sequence examples)...\")\n\n # Some basic information on the dataset\n matrix_bundle_fields = []\n classes = set()\n # For now we only have dataset having 1 single bundle (e.g. no video+audio)\n num_bundles = 1\n num_classes = 0\n num_examples = 0\n sequence_size_max = 0\n sequence_size_min = 0\n sequence_size_median = 0\n is_sparse = None # True or False\n # Domain in ['audio_text_or_time_series', 'image_or_vector', 'video']\n # inferred_dataset_domain = None\n\n # Some basic information on each example\n num_timestamps = []\n num_features = []\n num_labels = []\n\n # Begin extracting\n counter = 0\n for se in tf.python_io.tf_record_iterator(path_to_tfrecord):\n sequence_example = tf.train.SequenceExample.FromString(se)\n\n context_feature = sequence_example.context.feature\n feature_lists_container = sequence_example.feature_lists.feature_list\n # Update num_labels\n labels = list(context_feature['label_index'].int64_list.value)\n num_labels.append(len(labels))\n\n if not matrix_bundle_fields:\n matrix_bundle_fields += list(feature_lists_container)\n else: # Make sure that fields name are consistent (coherent)\n assert(all([x in matrix_bundle_fields for x in feature_lists_container]))\n\n # Update classes\n classes = classes.union(set(labels))\n\n dense_key = '0_dense_input'\n sparse_value = '0_sparse_value'\n if dense_key in feature_lists_container:\n if is_sparse:\n raise ValueError(\"Inconsistent sparsity at index {}!\".format(counter))\n elif is_sparse is None:\n is_sparse = False\n key = dense_key\n elif sparse_value in feature_lists_container:\n if is_sparse is not None:\n if not is_sparse:\n raise ValueError(\"Inconsistent sparsity at index {}!\"\\\n .format(counter))\n else:\n is_sparse = True\n key = sparse_value\n\n # Update num_timestamps\n feature_list = feature_lists_container[key]\n num_timestamps.append(_len_feature_list(feature_list))\n # Update num_features\n feature_vec = _get_first_feature(feature_list)\n num_features.append(_len_feature(feature_vec))\n\n counter += 1\n\n examples_info = pd.DataFrame({'num_timestamps': num_timestamps,\n 'num_features': num_features,\n 'num_labels': num_labels})\n\n sequence_sizes = examples_info['num_timestamps']\n sequence_size_max = int(sequence_sizes.max())\n sequence_size_min = int(sequence_sizes.min())\n sequence_size_median = sequence_sizes.median()\n\n dataset_info = {'matrix_bundle_fields': matrix_bundle_fields,\n 'classes': list(classes),\n 'num_bundles': num_bundles,\n 'num_classes': len(classes),\n 'num_examples': examples_info.shape[0],\n 'sequence_size_max': sequence_size_max,\n 'sequence_size_min': sequence_size_min,\n 'sequence_size_median': sequence_size_median,\n 'is_sparse': is_sparse\n }\n examples_info.to_csv(csv_filepath, index=False)\n with open(yaml_filepath, 'w') as f:\n yaml.dump(dataset_info, f)\n return dataset_info, examples_info", "def _decode_record(self, record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n # tf.logging.info(t)\n # t = tf.sparse.to_dense(t)\n # tf.logging.info(t.get_shape().as_list())\n # assert t.get_shape().as_list()[0] is not None\n example[name] = t\n \n del example[\"source_sos_ids\"]\n del example[\"source_sos_mask\"]\n\n return example", "def parse_single_example(serialized_example):\n feature_description = {\n \"immrf/data\": tf.io.FixedLenFeature([], tf.string),\n \"immrf/shape\": tf.io.VarLenFeature(tf.int64),\n \"immrf/path\": tf.io.FixedLenFeature([], tf.string),\n \"tmap/data\": tf.io.FixedLenFeature([], tf.string),\n \"tmap/shape\": tf.io.VarLenFeature(tf.int64),\n \"tmap/path\": tf.io.FixedLenFeature([], tf.string),\n \"mask/data\": tf.io.FixedLenFeature([], tf.string),\n \"mask/shape\": tf.io.VarLenFeature(tf.int64),\n \"mask/path\": tf.io.FixedLenFeature([], tf.string),\n }\n slice = tf.io.parse_single_example(serialized_example, feature_description)\n for key in [\"immrf\", \"tmap\", \"mask\"]:\n slice[key + \"/data\"] = tf.io.decode_raw(slice[key + \"/data\"], out_type=tf.float32)\n slice[key + \"/data\"] = utils.reshape_back(slice, key)\n return slice", "def _parser(serialized_example):\n\n features = tf.compat.v1.parse_single_example(\n serialized_example,\n features={\n 'img_raw': tf.compat.v1.FixedLenFeature([], tf.string),\n 'label': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'category': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'elevation': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'azimuth': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'lighting': tf.compat.v1.FixedLenFeature([], tf.int64),\n })\n\n img = tf.compat.v1.decode_raw(features['img_raw'], tf.float64)\n img = tf.reshape(img, [96, 96, 1])\n img = tf.cast(img, tf.float32) # * (1. / 255) # left unnormalized\n\n lab = tf.cast(features['label'], tf.int32)\n cat = tf.cast(features['category'], tf.int32)\n elv = tf.cast(features['elevation'], tf.int32)\n azi = tf.cast(features['azimuth'], tf.int32)\n lit = tf.cast(features['lighting'], tf.int32)\n\n return img, lab, cat, elv, azi, lit", "def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n input_ids, segment_ids, input_mask = \\\n tokenizer.encode_text(text_a=example.text_a,\n text_b=example.text_b,\n max_seq_length=max_seq_length)\n\n label_id = label_map[example.label]\n\n # here we disable the verbose printing of the data\n if ex_index < 0:\n logging.info(\"*** Example ***\")\n logging.info(\"guid: %s\", example.guid)\n logging.info(\"input_ids: %s\", \" \".join([str(x) for x in input_ids]))\n logging.info(\"input_ids length: %d\", len(input_ids))\n logging.info(\"input_mask: %s\", \" \".join([str(x) for x in input_mask]))\n logging.info(\"segment_ids: %s\", \" \".join([str(x) for x in segment_ids]))\n logging.info(\"label: %s (id = %d)\", example.label, label_id)\n\n feature = InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id)\n return feature", "def _decode_record(record, name_to_features):\n\t\t\texample = tf.parse_single_example(record, name_to_features)\n\n\t\t\t# tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n\t\t\t# So cast all int64 to int32.\n\t\t\tfor name in list(example.keys()):\n\t\t\t\tt = example[name]\n\t\t\t\tif t.dtype == tf.int64:\n\t\t\t\t\tt = tf.to_int32(t)\n\t\t\t\texample[name] = t\n\n\t\t\treturn example", "def _decode_record(record,name_to_features):\n example = tf.parse_single_example(record,name_to_features)\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n return example", "def _parse_example(self, example, scale_to_0_1: bool = False):\n\n features = {\n 'image': tf.FixedLenFeature([], tf.string),\n 'mask': tf.FixedLenFeature([], tf.string),\n }\n parsed_example = tf.parse_single_example(example, features)\n\n image = tf.decode_raw(parsed_example['image'], self.serialized_image_raw_dtype)\n image = tf.reshape(image, (self.image_width, self.image_width, self.image_channels))\n image = tf.cast(image, tf.float32)\n if scale_to_0_1:\n image /= 255.\n\n mask = tf.decode_raw(parsed_example['mask'], self.serialized_mask_raw_dtype)\n mask = tf.reshape(mask, (self.image_width, self.image_width, self.mask_channels))\n mask = tf.cast(mask, tf.float32) / 255.\n return image, mask", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n print(name)\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def parser(self, serialized_example):\n features = {\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/colorspace': tf.FixedLenFeature([], tf.string),\n 'image/channels': tf.FixedLenFeature([], tf.int64),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/fixation_pt': tf.FixedLenFeature([2], tf.float32)}\n parsed_features = tf.parse_single_example(serialized_example, features)\n\n # Get label as a Tensor.\n label = parsed_features['image/class/label']\n\n # Decode the image JPEG string into a Tensor.\n image = tf.image.decode_jpeg(parsed_features['image/encoded'],\n channels=self.DEPTH)\n\n # Convert from uint8 -> float32 and map onto range [0, 1].\n image = tf.cast(image, tf.float32) * (1. / 255)\n\n # Standardize image.\n image = tf.image.per_image_standardization(image)\n\n # Apply data augmentation.\n if (self.mode == tf.estimator.ModeKeys.TRAIN\n and self.params['train_with_distortion']):\n # Randomly flip the image, zero-pad with four pixels along\n # each edge, and take a random 32 x 32 crop.\n image = tf.image.random_flip_left_right(image)\n image = tf.image.resize_image_with_crop_or_pad(image, 40, 40)\n image = tf.image.crop_to_bounding_box(image,\n tf.random_uniform([], minval=0, maxval=8, dtype=tf.int32),\n tf.random_uniform([], minval=0, maxval=8, dtype=tf.int32),\n 32, 32)\n\n return image, label", "def __parser__(self, example_proto):\n # configure feature and label length\n # It is crucial that for tf.string, the length is not specified, as the data is stored as a single string!\n x_config = tf.FixedLenFeature([], tf.string) \\\n if self.x_dtype == tf.string else tf.FixedLenFeature([self.num_features], self.x_dtype)\n if self.num_labels == 0:\n proto_config = {'x': x_config}\n else:\n y_config = tf.FixedLenFeature([], tf.string) \\\n if self.y_dtype == tf.string else tf.FixedLenFeature([self.num_labels], self.y_dtype)\n proto_config = {'x': x_config, 'y': y_config}\n\n # decode examples\n datum = tf.parse_single_example(example_proto, features=proto_config)\n if self.x_dtype == tf.string: # if input is string / bytes, decode it to float32\n if self.decode_jpeg:\n # first decode compressed image string to uint8, as data is stored in this way\n # datum['x'] = tf.image.decode_image(datum['x'], channels=3)\n datum['x'] = tf.image.decode_jpeg(datum['x'], channels=3)\n else:\n # first decode data to uint8, as data is stored in this way\n datum['x'] = tf.decode_raw(datum['x'], tf.uint8)\n # then cast data to tf.float32 or tf.float16\n datum['x'] = tf.cast(datum['x'], tf.float32)\n # cannot use string_to_number as there is only one string for a whole sample\n # datum['x'] = tf.strings.to_number(datum['x'], tf.float32) # this results in possibly a large number\n\n # return data\n if 'y' in datum:\n # y can be present in many ways:\n # 1. a single integer, which requires y to be int32 or int64 (e.g, used in tf.gather in cbn)\n # 2. num-class bool/integer/float variables. This form is more flexible as it allows multiple classes and\n # prior probabilities as targets\n # 3. float variables in regression problem.\n # but...\n # y is stored as int (for case 1), string (for other int cases), or float (for float cases)\n # in the case of tf.string and tf.int64, convert to to int32\n if self.y_dtype == tf.string:\n # avoid using string labels like 'cat', 'dog', use integers instead\n datum['y'] = tf.decode_raw(datum['y'], tf.uint8)\n datum['y'] = tf.cast(datum['y'], tf.int32)\n else:\n datum['y'] = tf.cast(datum['y'], self.y_dtype)\n if self.use_one_hot_label:\n datum['y'] = tf.reshape(tf.one_hot(datum['y'], self.num_classes), (-1, ))\n if self.use_smooth_label: # label smoothing\n datum['y'] = 0.9 * datum['y'] + 0.1 / self.num_classes\n return datum['x'], datum['y']\n else:\n return datum['x']", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def parser(serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n \"image\": tf.FixedLenFeature([], tf.string),\n \"label\": tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features[\"image\"], tf.uint8)\n image.set_shape([CHANNELS * HEIGHT * WIDTH])\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [CHANNELS, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32) * (2. / 255) - 1\n\n label = tf.cast(features['label'], tf.int32)\n\n random_noise = tf.random_normal([noise_dim])\n features = {\n 'real_images': image,\n 'random_noise': random_noise}\n\n return features, label", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _parse_function(self, example_proto):\n\n # Currently only supports jpeg and png.\n # Need to use this logic because the shape is not known for\n # tf.image.decode_image and we rely on this info to\n # extend label if necessary.\n def _decode_image(content, channels):\n return tf.cond(\n tf.image.is_jpeg(content),\n lambda: tf.image.decode_jpeg(content, channels),\n lambda: tf.image.decode_png(content, channels))\n\n features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/filename':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/height':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/width':\n tf.FixedLenFeature((), tf.int64, default_value=0),\n 'image/segmentation/class/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/segmentation/class/format':\n tf.FixedLenFeature((), tf.string, default_value='png'),\n }\n\n parsed_features = tf.parse_single_example(example_proto, features)\n\n image = _decode_image(parsed_features['image/encoded'], channels=3)\n\n label = None\n if self.split_name != common.TEST_SET:\n label = _decode_image(\n parsed_features['image/segmentation/class/encoded'], channels=1)\n\n image_name = parsed_features['image/filename']\n if image_name is None:\n image_name = tf.constant('')\n\n sample = {\n common.IMAGE: image,\n common.IMAGE_NAME: image_name,\n common.HEIGHT: parsed_features['image/height'],\n common.WIDTH: parsed_features['image/width'],\n }\n\n if label is not None:\n if label.get_shape().ndims == 2:\n label = tf.expand_dims(label, 2)\n elif label.get_shape().ndims == 3 and label.shape.dims[2] == 1:\n pass\n else:\n raise ValueError('Input label shape must be [height, width], or '\n '[height, width, 1].')\n\n label.set_shape([None, None, 1])\n\n sample[common.LABELS_CLASS] = label\n\n return sample", "def _parse_tfexample(example):\n\n ## parse\n features = tf.parse_single_example(example, KEYS2FEATURES)\n\n image = tf.image.decode_png(features['image/encoded'])\n label = tf.image.decode_png(features['label/encoded'])\n # label is decoded as a 3-D png image\n label = label[..., 0]\n im_path = features['image/path']\n la_path = features['label/path']\n\n return image, label, im_path, la_path", "def parse_func(record):\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),\n 'image/path': tf.FixedLenFeature((), tf.string, default_value=''),\n 'label/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'label/format': tf.FixedLenFeature((), tf.string, default_value='png'),\n 'label/path': tf.FixedLenFeature((), tf.string, default_value=''),\n 'height': tf.FixedLenFeature((), tf.int64),\n 'width': tf.FixedLenFeature((), tf.int64)\n }\n\n features = tf.parse_single_example(record, keys_to_features)\n\n image = tf.image.decode_png(features['image/encoded'], channels=3)\n label_dtype = tf.uint8\n label = tf.image.decode_png(features['label/encoded'], channels=1, dtype=label_dtype)\n label = tf.reshape(label, tf.convert_to_tensor([features['height'], features['width'], 1]))\n label = tf.squeeze(label)\n\n paths = (features['image/path'], features['label/path'])\n return image, label, paths", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def input_fn(filename):\n\n def parse_single_tfrecord(serializer_item):\n features = {\n 'label': tf.FixedLenFeature([], tf.int64),\n 'sentence': tf.FixedLenFeature([], tf.string)\n }\n\n features_var = tf.parse_single_example(serializer_item, features)\n\n labels = tf.cast(features_var['label'], tf.int64)\n sentence = tf.decode_raw(features_var['sentence'], tf.uint8)\n sentence = tf.cast(sentence, tf.int64)\n return sentence, labels\n\n tf_record_filename = filename\n if not os.path.exists(tf_record_filename):\n raise FileNotFoundError(\"tfrecord not found\")\n tf_record_reader = tf.data.TFRecordDataset(tf_record_filename)\n\n dataset = tf_record_reader.map(parse_single_tfrecord).shuffle(50000).batch(\n 10).repeat(1)\n iterator = dataset.make_one_shot_iterator()\n data, labels = iterator.get_next()\n return data, labels", "def example_loader(\n data_path: str,\n index_path: typing.Union[str, None],\n description: typing.Union[typing.List[str], typing.Dict[str, str], None] = None,\n shard: typing.Optional[typing.Tuple[int, int]] = None,\n compression_type: typing.Optional[str] = None,\n) -> typing.Iterable[typing.Dict[str, np.ndarray]]:\n\n typename_mapping = {\n \"byte\": \"bytes_list\",\n \"float\": \"float_list\",\n \"int\": \"int64_list\"\n }\n\n record_iterator = tfrecord_iterator(\n data_path=data_path,\n index_path=index_path,\n shard=shard,\n compression_type=compression_type,\n )\n\n for record in record_iterator:\n example = example_pb2.Example()\n example.ParseFromString(record)\n\n yield extract_feature_dict(example.features, description, typename_mapping)", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _deserialize_example(example_proto, labeled=True):\n if labeled:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string),\n 'patient_id': tf.io.FixedLenFeature([], tf.int64),\n 'sex': tf.io.FixedLenFeature([], tf.int64),\n 'age_approx': tf.io.FixedLenFeature([], tf.int64),\n 'anatom_site_general_challenge': tf.io.FixedLenFeature([], tf.int64),\n 'diagnosis': tf.io.FixedLenFeature([], tf.int64),\n 'target': tf.io.FixedLenFeature([], tf.int64)\n }\n else:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string)\n }\n\n return tf.io.parse_single_example(example_proto, feature_description)", "def parser(self, serialized_example):\n features = {\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/colorspace': tf.FixedLenFeature([], tf.string),\n 'image/channels': tf.FixedLenFeature([], tf.int64),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/class/synset': tf.FixedLenFeature([], tf.string),\n 'image/class/text': tf.FixedLenFeature([], tf.string),\n 'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/label': tf.VarLenFeature(tf.int64),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/encoded': tf.FixedLenFeature([], tf.string)}\n parsed_features = tf.parse_single_example(serialized_example, features)\n\n # Get label as a Tensor.\n label = parsed_features['image/class/label']\n\n # Decode the image JPEG string into a Tensor.\n image = tf.image.decode_jpeg(parsed_features['image/encoded'],\n channels=self.DEPTH)\n\n # VGG preprocessing borrowed from slim; includes data augmentation so train_with_distortion should be set to True.\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n assert self.params['train_with_distortion'] == True\n is_training = True\n else:\n is_training = False\n image = vgg_preprocess_image(image, 224, 224, is_training=is_training)\n\n return image, label", "def _decode_record(record):\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"input_mask\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"stroke_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"lmask\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"label_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n }\n\n\n example = tf.parse_single_example(record, name_to_features)\n\n #int64 to int32\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n input_ids = example['input_ids']\n input_mask = example['input_mask']\n segment_ids = example['segment_ids']\n stroke_ids = example['stroke_ids']\n label_ids = example['label_ids']\n lmask = example['lmask']\n py_labels = tf.py_func(_get_py_seq, [label_ids], [tf.int32])\n\n return input_ids, input_mask, segment_ids, stroke_ids, lmask, label_ids, py_labels", "def convert_examples_to_features(self):\n features = []\n max_label_len = 0\n # find ou the max label length\n labels_list = []\n for ex_index, example in enumerate(self.examples):\n processor = example.processor\n label_ids = self.tokenizer.text_to_ids(processor.label2string(example.label)) + [self.tokenizer.eos_id]\n max_label_len = max(len(label_ids), max_label_len)\n labels_list.append(label_ids)\n if self.max_seq_length_decoder is None:\n self.max_seq_length_decoder = max_label_len\n else:\n self.max_seq_length_decoder = max(\n self.max_seq_length_decoder, max_label_len\n ) # take the max of the two to be conservative\n for ex_index, example in enumerate(self.examples):\n taskname = example.taskname\n taskname_ids = self.tokenizer.text_to_ids(taskname)\n processor = example.processor\n if ex_index % 10000 == 0:\n logging.info(f\"Writing example {ex_index} of {len(self.examples)}\")\n label_ids = labels_list[ex_index]\n enc_query = processor.get_ptune_query(\n example.content,\n self.pseudo_token_id,\n self.max_seq_length - self.max_seq_length_decoder + 1,\n self.templates,\n self.tokenizer,\n )\n input_ids = enc_query + label_ids[:-1]\n labels = [SMALL_NUM for i in range(len(enc_query) - 1)] + label_ids\n features.append([input_ids, labels, enc_query, taskname_ids])\n return features", "def _convert_raw_example(\n self,\n mode_dict: MutableMapping[str, Any],\n example: Mapping[str, Any]) -> ProcessedExample:\n img_path = example['image_path_or_name']\n base_name = os.path.basename(img_path)\n img_fobj = example.get('image_fobj', tf.io.gfile.GFile(img_path, 'rb'))\n img_bytes, img_shape = image_utils.image_to_jpeg(fobj=img_fobj,\n filename=base_name)\n\n img_format = 'JPEG'\n key = hashlib.sha256(img_bytes.read()).hexdigest()\n img_bytes.seek(0)\n\n bboxes = example['bbox_info']\n processed_bboxes = []\n\n img_height = img_shape[0]\n img_width = img_shape[1]\n\n img_id = example.get('image_id', self._get_id('image'))\n mode_dict['images'].append({\n 'id': img_id,\n 'width': img_width,\n 'height': img_height,\n })\n\n for bbox_info in bboxes:\n annotations_bbox = bbox_info['bbox']\n bbox = bbox_utils.BBox(bbox=annotations_bbox,\n fmt=self.builder_config.bbox_format,\n img_width=img_width,\n img_height=img_height)\n label = bbox_info['label']\n if isinstance(label, int):\n text = str(label)\n elif isinstance(label, six.string_types):\n text = label\n label = bbox_info.get('label_id', self._get_label_id(text))\n else:\n raise TypeError(\n 'The provided label was not a string or int. Got: {}'.format(\n type(label)))\n\n if label >= self.builder_config.num_labels:\n raise ValueError('Provided label {} for {} is greater than '\n 'the number of classes specified. num_classes: '\n '{}'.format(label,\n base_name,\n self.builder_config.num_labels))\n\n annotation_id = example.get('annotation_id', self._get_id('annotation'))\n bbox.convert(bbox_utils.BBoxFormat.NORMALIZED_MIN_MAX)\n xmin, xmax, ymin, ymax = bbox.as_tuple()\n bbox = bbox.convert(bbox_utils.BBoxFormat.WIDTH_HEIGHT)\n mode_dict['annotations'].append({\n 'id': annotation_id,\n 'image_id': img_id,\n 'category_id': label,\n 'bbox': annotations_bbox,\n })\n\n processed_bboxes.append({\n 'bbox': tfds.features.BBox(ymin=ymin,\n xmin=xmin,\n ymax=ymax,\n xmax=xmax),\n 'class': {\n 'text': text,\n 'label': label,\n }\n })\n\n return img_id, {\n 'image': {\n 'height': img_width,\n 'width': img_shape[1],\n 'filename': img_path,\n 'source_id': img_id,\n 'encoded': img_bytes,\n 'format': img_format,\n 'key': {\n 'sha256': key,\n },\n 'object': processed_bboxes,\n }\n }", "def _decode_record(record):\r\n example = tf.io.parse_single_example(serialized=record, features=feature_description)\r\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\r\n # So cast all int64 to int32.\r\n for key in [k for k in example.keys() if k not in ['example_id', 'unique_ids']]:\r\n example[key] = tf.cast(example[key], dtype=tf.int32)\r\n if is_training:\r\n features = {\r\n 'input_ids': example['input_ids'],\r\n 'input_mask': example['input_mask'],\r\n 'segment_ids': example['segment_ids']\r\n }\r\n labels = {\r\n 'start_logits_or_probs': tf.one_hot(example['start_positions'],\r\n depth=seq_length, dtype=tf.float32),\r\n 'end_logits_or_probs': tf.one_hot(example['end_positions'],\r\n depth=seq_length, dtype=tf.float32),\r\n 'ans_type': tf.one_hot(example['answer_types'],\r\n depth=len(ANSWER_TYPE_ORDER), dtype=tf.float32)\r\n }\r\n return (features, labels)\r\n else:\r\n return example", "def test_example(self, example_dataset, expected_result):\n\n transformer = PreprocessFeatures()\n result = transformer.fit_transform(example_dataset)\n\n assert (result == expected_result).all()", "def convert_examples_to_features(examples, max_seq_length, tokenizer):\n\n features = []\n for (ex_index, example) in enumerate(examples):\n print(example.text_a)\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n input_mask = [1] * len(input_ids)\n\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n \n labels_ids = []\n for label in example.labels:\n labels_ids.append(int(label))\n \n if ex_index < 0:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %s)\" % (example.labels, labels_ids))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=labels_ids))\n return features", "def single_example_parser(serialized_example):\n # Dimensions of the images in the CIFAR-10 dataset.\n # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the\n # input format.\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features['image'], tf.uint8)\n image.set_shape([DEPTH * HEIGHT * WIDTH])\n\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32)\n label = tf.cast(features['label'], tf.int32)\n \n image = train_preprocess_fn(image)\n label = tf.one_hot(label, NUM_CLASSES)\n \n return image, label", "def input_fn():\n files = tf.data.Dataset.list_files(os.path.join(\n tft_working_dir, filebase + '*'))\n dataset = files.interleave(\n tf.data.TFRecordDataset, cycle_length=4, block_length=16)\n dataset = dataset.map(parser)\n\n if shuffle:\n dataset = dataset.shuffle(buffer_size)\n\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n\n dataset = dataset.prefetch(prefetch_buffer_size)\n iterator = dataset.make_one_shot_iterator()\n transformed_features, transformed_labels = iterator.get_next()\n\n return transformed_features, transformed_labels", "def parse_record(raw_record, is_training):\n keys_to_features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/class/label':\n tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),\n 'image/class/text':\n tf.FixedLenFeature([], dtype=tf.string, default_value=''),\n }\n\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n\n image = tf.image.decode_image(\n tf.reshape(parsed['image/encoded'], shape=[]),\n _NUM_CHANNELS)\n\n # Note that tf.image.convert_image_dtype scales the image data to [0, 1).\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n label = tf.cast(\n tf.reshape(parsed['image/class/label'], shape=[]),\n dtype=tf.int32)\n\n return {\"image\": image}, label", "def _decode_record(record, name_to_features):\n example = tf.io.parse_single_example(serialized=record, features=name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, dtype=tf.int32)\n example[name] = t\n\n return example", "def input_fn(params=None):\n del params\n full_pattern = os.path.join(flags.data_dir, pattern)\n dataset = tf.data.Dataset.list_files(full_pattern)\n\n if flags.initial_shuffle_buffer_size > 0:\n dataset = dataset.shuffle(buffer_size=flags.initial_shuffle_buffer_size)\n dataset = dataset.repeat()\n\n # use interleave() and prefetch() to read many files concurrently\n def prefetch_map_fn(filename):\n return tf.data.TFRecordDataset(filename).prefetch(batch_size)\n\n if flags.prefetch_enabled:\n dataset = dataset.interleave(\n prefetch_map_fn,\n cycle_length=flags.cycle_length,\n block_length=batch_size)\n\n if flags.followup_shuffle_buffer_size > 0:\n dataset = dataset.shuffle(buffer_size=flags.followup_shuffle_buffer_size)\n\n frame_nums = range(0, flags.sequence_length, flags.skip_num)\n\n def parser(_, serialized_example):\n \"\"\"Parses a single example.\"\"\"\n features = {}\n\n for i in frame_nums:\n image_name = 'image_' + str(i)\n if flags.dataset_type == 'robot':\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n\n features[pose_name] = tf.FixedLenFeature([flags.pose_dim], tf.float32)\n features[image_name] = tf.FixedLenFeature([1], tf.string)\n features[action_name] = tf.FixedLenFeature([flags.pose_dim],\n tf.float32)\n features[joint_pos_name] = tf.FixedLenFeature([flags.joint_pos_dim],\n tf.float32)\n else:\n features[image_name] = tf.FixedLenFeature([1], tf.string)\n\n parsed_input = tf.parse_single_example(serialized_example, features)\n\n for i in frame_nums:\n image_name = 'image_' + str(i)\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n\n # Process image\n image_buffer = tf.reshape(parsed_input[image_name], shape=[])\n image = tf.image.decode_jpeg(image_buffer, channels=COLOR_CHAN)\n image = tf.image.resize_images(\n image, (IMG_HEIGHT, IMG_WIDTH),\n method=tf.image.ResizeMethod.BICUBIC)\n image = tf.cast(tf.expand_dims(image, 0), tf.float32) / 255.0\n\n if flags.dataset_type == 'robot':\n pose = tf.reshape(parsed_input[pose_name], shape=[flags.pose_dim])\n pose = tf.expand_dims(pose, 0)\n action = tf.reshape(parsed_input[action_name], shape=[flags.pose_dim])\n action = tf.expand_dims(action, 0)\n joint_pos = tf.reshape(\n parsed_input[joint_pos_name], shape=[flags.joint_pos_dim])\n joint_pos = tf.expand_dims(joint_pos, 0)\n else:\n pose = tf.zeros([1, flags.pose_dim])\n action = tf.zeros([1, flags.pose_dim])\n joint_pos = tf.zeros([1, flags.joint_pos_dim])\n\n if i == 0:\n image_seq = image\n action_seq, pose_seq, joint_pos_seq = action, pose, joint_pos\n else:\n image_seq = tf.concat([image_seq, image], 0)\n action_seq = tf.concat([action_seq, action], 0)\n pose_seq = tf.concat([pose_seq, pose], 0)\n joint_pos_seq = tf.concat([joint_pos_seq, joint_pos], 0)\n\n return image_seq, action_seq, action_seq, joint_pos_seq\n\n dataset = dataset.map(\n parser,\n num_parallel_calls=flags.num_parallel_calls).prefetch(batch_size)\n\n dataset = dataset.batch(batch_size)\n\n # use prefetch to overlap producer and consumer\n dataset = dataset.prefetch(1)\n\n images, actions, poses, joint_pos = dataset.make_one_shot_iterator(\n ).get_next()\n\n images.set_shape([batch_size, len(frame_nums), IMG_HEIGHT, IMG_WIDTH, 3])\n actions.set_shape([batch_size, len(frame_nums), flags.pose_dim])\n poses.set_shape([batch_size, len(frame_nums), flags.pose_dim])\n joint_pos.set_shape([batch_size, len(frame_nums), flags.joint_pos_dim])\n\n joint_poses = tf.concat([joint_pos, poses], 2)\n\n output_features = {\n IMAGE_FEATURE_NAME: images,\n JOINT_POSE_FEATURE_NAME: joint_poses,\n ACTION_FEATURE_NAME: actions\n }\n\n return output_features, None", "def file_based_convert_examples_to_features(path, label2id, max_seq_length, tokenize_fn, output_file):\n tf.logging.info(\"Create new tfrecord {}.\".format(output_file))\n writer = tf.python_io.TFRecordWriter(output_file)\n df = pd.read_csv(path, index_col=0)\n df = shuffle(df)\n count = 0\n for index, row in df.iterrows():\n # label = label2id[row[\"topic\"].strip()]\n feature = convert_single_example(row[config[\"column_name_x1\"]],\n row[config[\"column_name_x2\"]] if config[\"column_name_x2\"] != \"\" else None,\n max_seq_length, tokenize_fn)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n def create_float_feature(values):\n f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))\n return f\n\n label = label2id.get(str(row[config[\"column_name_y\"]]))\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature[0])\n features[\"input_mask\"] = create_float_feature(feature[1])\n features[\"segment_ids\"] = create_int_feature(feature[2])\n features[\"label_ids\"] = create_int_feature([label])\n count += 1\n if count < 5:\n print(\"*** Example ***\")\n print(\"input_ids: %s\" % \" \".join([str(x) for x in feature[0]]))\n print(\"input_mask: %s\" % \" \".join([str(x) for x in feature[1]]))\n print(\"segment_ids: %s\" % \" \".join([str(x) for x in feature[2]]))\n\n print(\"label: %s (id = %s)\" % (row[config[\"column_name_y\"]], str(label)))\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n if count % 1000 == 0:\n print(count)\n writer.close()\n print(\"example count:\", count)", "def parse_from_tf_example(serialized,\n context_feature_spec=None,\n example_feature_spec=None,\n size_feature_name=None,\n mask_feature_name=None):\n batch_size = tf.shape(serialized)[0]\n features = tf.io.parse_example(\n serialized, features={\n **context_feature_spec,\n **example_feature_spec\n })\n for feature_key, feature_type in example_feature_spec.items():\n if isinstance(feature_type, tf.io.RaggedFeature):\n features[feature_key] = tf.expand_dims(features[feature_key], axis=1)\n else:\n # feature is either a Tensor or SparseTensor.\n features[feature_key] = utils.reshape_first_ndims(features[feature_key],\n 1, [batch_size, 1])\n if size_feature_name is not None:\n # Populate features with a size feature of value 1, corresponding to only\n # one example per list.\n features[size_feature_name] = tf.ones(shape=[batch_size])\n if mask_feature_name:\n features[mask_feature_name] = tf.sequence_mask(tf.ones(shape=[batch_size]))\n return features", "def file_based_convert_examples_to_features_v2(mode,\n examples, max_seq_length_x, max_seq_length_y, tokenizer, output_file, verbose=False):\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (_, example) in enumerate(examples):\n\n fea_x, fea_y = process_single_example(mode,\n example, max_seq_length_x, max_seq_length_y, tokenizer)\n\n def _create_int_feature(values):\n return tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n\n features = collections.OrderedDict()\n features[\"input_ids_x1x2ysx1xx2\"] = _create_int_feature(fea_x[\"input_ids_x1x2ysx1xx2\"])\n features[\"input_mask_x1x2ysx1xx2\"] = _create_int_feature(fea_x[\"input_mask_x1x2ysx1xx2\"])\n features[\"segment_ids_x1x2ysx1xx2\"] = _create_int_feature(fea_x[\"segment_ids_x1x2ysx1xx2\"])\n features[\"input_ids_x1x2ysx1xx2yy\"] = _create_int_feature(fea_x[\"input_ids_x1x2ysx1xx2yy\"])\n features[\"input_mask_x1x2ysx1xx2yy\"] = _create_int_feature(fea_x[\"input_mask_x1x2ysx1xx2yy\"])\n features[\"segment_ids_x1x2ysx1xx2yy\"] = _create_int_feature(fea_x[\"segment_ids_x1x2ysx1xx2yy\"])\n \n features[\"input_ids_x1x2\"] = _create_int_feature(fea_x[\"input_ids_x1x2\"])\n features[\"input_mask_x1x2\"] = _create_int_feature(fea_x[\"input_mask_x1x2\"])\n features[\"segment_ids_x1x2\"] = _create_int_feature(fea_x[\"segment_ids_x1x2\"])\n features[\"input_ids_x1xx2\"] = _create_int_feature(fea_x[\"input_ids_x1xx2\"])\n features[\"input_mask_x1xx2\"] = _create_int_feature(fea_x[\"input_mask_x1xx2\"])\n features[\"segment_ids_x1xx2\"] = _create_int_feature(fea_x[\"segment_ids_x1xx2\"])\n \n features[\"input_ids_y1\"] = _create_int_feature(fea_y[\"input_ids_y1\"])\n features[\"input_mask_y1\"] = _create_int_feature(fea_y[\"input_mask_y1\"])\n features[\"segment_ids_y1\"] = _create_int_feature(fea_y[\"segment_ids_y1\"])\n features[\"input_ids_y2\"] = _create_int_feature(fea_y[\"input_ids_y2\"])\n features[\"input_mask_y2\"] = _create_int_feature(fea_y[\"input_mask_y2\"])\n features[\"segment_ids_y2\"] = _create_int_feature(fea_y[\"segment_ids_y2\"])\n features[\"input_ids_y3\"] = _create_int_feature(fea_y[\"input_ids_y3\"])\n features[\"input_mask_y3\"] = _create_int_feature(fea_y[\"input_mask_y3\"])\n features[\"segment_ids_y3\"] = _create_int_feature(fea_y[\"segment_ids_y3\"])\n\n features[\"input_ids_yy1\"] = _create_int_feature(fea_y[\"input_ids_yy1\"])\n features[\"input_mask_yy1\"] = _create_int_feature(fea_y[\"input_mask_yy1\"])\n features[\"segment_ids_yy1\"] = _create_int_feature(fea_y[\"segment_ids_yy1\"])\n features[\"input_ids_yy2\"] = _create_int_feature(fea_y[\"input_ids_yy2\"])\n features[\"input_mask_yy2\"] = _create_int_feature(fea_y[\"input_mask_yy2\"])\n features[\"segment_ids_yy2\"] = _create_int_feature(fea_y[\"segment_ids_yy2\"])\n features[\"input_ids_yy3\"] = _create_int_feature(fea_y[\"input_ids_yy3\"])\n features[\"input_mask_yy3\"] = _create_int_feature(fea_y[\"input_mask_yy3\"])\n features[\"segment_ids_yy3\"] = _create_int_feature(fea_y[\"segment_ids_yy3\"])\n\n features[\"input_ids_y1_gpt\"] = _create_int_feature(fea_y[\"input_ids_y1_gpt\"])\n features[\"input_mask_y1_gpt\"] = _create_int_feature(fea_y[\"input_mask_y1_gpt\"])\n features[\"segment_ids_y1_gpt\"] = _create_int_feature(fea_y[\"segment_ids_y1_gpt\"])\n features[\"input_ids_y2_gpt\"] = _create_int_feature(fea_y[\"input_ids_y2_gpt\"])\n features[\"input_mask_y2_gpt\"] = _create_int_feature(fea_y[\"input_mask_y2_gpt\"])\n features[\"segment_ids_y2_gpt\"] = _create_int_feature(fea_y[\"segment_ids_y2_gpt\"])\n features[\"input_ids_y3_gpt\"] = _create_int_feature(fea_y[\"input_ids_y3_gpt\"])\n features[\"input_mask_y3_gpt\"] = _create_int_feature(fea_y[\"input_mask_y3_gpt\"])\n features[\"segment_ids_y3_gpt\"] = _create_int_feature(fea_y[\"segment_ids_y3_gpt\"])\n\n tf_example = tf.train.Example(\n features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())", "def input_fn():\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=sample_length)\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: tf.parse_single_example(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n return d", "def convert_examples_to_features(examples,label_list, max_seq_length,tokenizer):\r\n label_map = {}\r\n for (i, label) in enumerate(label_list):\r\n label_map[label] = i\r\n\r\n input_data=[]\r\n for (ex_index, example) in enumerate(examples):\r\n tokens_a = tokenizer.tokenize(example.text_a)\r\n tokens_b = None\r\n if example.text_b:\r\n tokens_b = tokenizer.tokenize(example.text_b)\r\n if tokens_b:\r\n # Modifies `tokens_a` and `tokens_b` in place so that the total\r\n # length is less than the specified length.\r\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\r\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\r\n else:\r\n # Account for [CLS] and [SEP] with \"- 2\"\r\n if len(tokens_a) > max_seq_length - 2:\r\n tokens_a = tokens_a[0:(max_seq_length - 2)]\r\n\r\n if ex_index % 10000 == 0:\r\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\r\n\r\n # The convention in BERT is:\r\n # (a) For sequence pairs:\r\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\r\n # (b) For single sequences:\r\n # tokens: [CLS] the dog is hairy . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0\r\n #\r\n # Where \"type_ids\" are used to indicate whether this is the first\r\n # sequence or the second sequence. The embedding vectors for `type=0` and\r\n # `type=1` were learned during pre-training and are added to the wordpiece\r\n # embedding vector (and position vector). This is not *strictly* necessary\r\n # since the [SEP] token unambigiously separates the sequences, but it makes\r\n # it easier for the model to learn the concept of sequences.\r\n #\r\n # For classification tasks, the first vector (corresponding to [CLS]) is\r\n # used as as the \"sentence vector\". Note that this only makes sense because\r\n # the entire model is fine-tuned.\r\n tokens = []\r\n segment_ids = []\r\n tokens.append(\"[CLS]\")\r\n segment_ids.append(0)\r\n for token in tokens_a:\r\n tokens.append(token)\r\n segment_ids.append(0)\r\n tokens.append(\"[SEP]\")\r\n segment_ids.append(0)\r\n\r\n if tokens_b:\r\n for token in tokens_b:\r\n tokens.append(token)\r\n segment_ids.append(1)\r\n tokens.append(\"[SEP]\")\r\n segment_ids.append(1)\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\r\n\r\n input_mask = [1] * len(input_ids)\r\n\r\n while len(input_ids) < max_seq_length:\r\n input_ids.append(0)\r\n input_mask.append(0)\r\n segment_ids.append(0)\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n\r\n label_id = label_map[example.label]\r\n if ex_index < 3:\r\n tf.logging.info(\"*** Example ***\")\r\n tf.logging.info(\"guid: %s\" % (example.guid))\r\n tf.logging.info(\"tokens: %s\" % \" \".join([tokenization.printable_text(x) for x in tokens]))\r\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\r\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\r\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\r\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\r\n\r\n features = collections.OrderedDict()\r\n features[\"input_ids\"] = input_ids\r\n features[\"input_mask\"] = input_mask\r\n features[\"segment_ids\"] = segment_ids\r\n features[\"label_ids\"] =label_id\r\n input_data.append(features)\r\n\r\n return input_data", "def parse_serialized_simulation_example(example_proto, metadata):\n if 'context_mean' in metadata:\n feature_description = _FEATURE_DESCRIPTION_WITH_GLOBAL_CONTEXT\n else:\n feature_description = _FEATURE_DESCRIPTION\n context, parsed_features = tf.io.parse_single_sequence_example(\n example_proto,\n context_features=_CONTEXT_FEATURES,\n sequence_features=feature_description)\n for feature_key, item in parsed_features.items():\n convert_fn = functools.partial(\n convert_to_tensor, encoded_dtype=_FEATURE_DTYPES[feature_key]['in'])\n parsed_features[feature_key] = tf.py_function(\n convert_fn, inp=[item.values], Tout=_FEATURE_DTYPES[feature_key]['out'])\n\n # There is an extra frame at the beginning so we can calculate pos change\n # for all frames used in the paper.\n position_shape = [metadata['sequence_length'] + 1, -1, metadata['dim']]\n\n # Reshape positions to correct dim:\n parsed_features['position'] = tf.reshape(parsed_features['position'],\n position_shape)\n # Set correct shapes of the remaining tensors.\n sequence_length = metadata['sequence_length'] + 1\n if 'context_mean' in metadata:\n context_feat_len = len(metadata['context_mean'])\n parsed_features['step_context'] = tf.reshape(\n parsed_features['step_context'],\n [sequence_length, context_feat_len])\n # Decode particle type explicitly\n context['particle_type'] = tf.py_function(\n functools.partial(convert_fn, encoded_dtype=np.int64),\n inp=[context['particle_type'].values],\n Tout=[tf.int64])\n context['particle_type'] = tf.reshape(context['particle_type'], [-1])\n return context, parsed_features", "def parse_fn(self, example_serialized):\n feature_description = {\n 'image_raw': tf.io.FixedLenFeature([], tf.string),\n 'label': tf.io.FixedLenFeature([], tf.int64)\n }\n features = tf.io.parse_single_example(example_serialized, feature_description)\n image = tf.io.decode_raw(features['image_raw'], tf.uint8)\n image = tf.cast(image, dtype='float32') / 255.0\n label = tf.cast(features['label'], dtype=tf.int32)\n image = tf.reshape(image, [32, 32, 3])\n if self.is_training:\n image = tf.image.resize_with_crop_or_pad(image, 32 + 8, 32 + 8)\n image = tf.image.random_crop(image, [32, 32, 3])\n image = tf.image.random_flip_left_right(image)\n return image, label", "def main(unused_argv):\n\n # Create tokenizer based on the training files.\n logging.info(\"Step 1: Loading tokenizer\")\n train_en = FLAGS.data_dir+'/EN_TRAIN_CORPUS_NAME'\n val_en = FLAGS.data_dir+'/EN_VAL_CORPUS_NAME'\n\n VOCAB_FILE = \"VOCAB_NAME\" \n vocab_file = os.path.join(FLAGS.data_dir, VOCAB_FILE)\n subtokenizer = tokenizer.Subtokenizer.init_from_files(\n vocab_file, [train_en], _TARGET_VOCAB_SIZE, _TARGET_THRESHOLD,\n min_count=None if FLAGS.search else _TRAIN_DATA_MIN_COUNT)\n\n compiled_train_files = (train_en, train_en)\n compiled_eval_files = (val_en, val_en)\n\n # Tokenize and save data as Examples in the TFRecord format.\n logging.info(\"Step 3: Preprocessing and saving data\")\n train_tfrecord_files = encode_and_save_files(\n subtokenizer, FLAGS.data_dir, compiled_train_files, _TRAIN_TAG,\n _TRAIN_SHARDS)\n encode_and_save_files(\n subtokenizer, FLAGS.data_dir, compiled_eval_files, _EVAL_TAG,\n _EVAL_SHARDS)\n\n for fname in train_tfrecord_files:\n shuffle_records(fname)", "def to_tfrecord(data_blob):\n\n id = np.array(data_blob['id'], dtype=np.int32).tobytes()\n dim = np.array(data_blob['images'].shape, dtype=np.int32).tobytes()\n\n images = np.array(data_blob['images'], dtype=np.uint8).tobytes()\n poses = np.array(data_blob['poses'], dtype=np.float32).tobytes()\n depth = np.array(data_blob['depth'], dtype=np.float32).tobytes()\n filled = np.array(data_blob['filled'], dtype=np.float32).tobytes()\n intrinsics = np.array(data_blob['intrinsics'], dtype=np.float32).tobytes()\n\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[id])),\n 'dim': tf.train.Feature(bytes_list=tf.train.BytesList(value=[dim])),\n 'images': tf.train.Feature(bytes_list=tf.train.BytesList(value=[images])),\n 'poses': tf.train.Feature(bytes_list=tf.train.BytesList(value=[poses])),\n 'depth': tf.train.Feature(bytes_list=tf.train.BytesList(value=[depth])),\n 'filled': tf.train.Feature(bytes_list=tf.train.BytesList(value=[filled])),\n 'intrinsics': tf.train.Feature(bytes_list=tf.train.BytesList(value=[intrinsics])),\n }))\n\n return example", "def _read_and_decode(example_proto,data_shape,dtypes):\n features = {}\n for name in data_shape:\n features[name] = tf.FixedLenFeature([], tf.string)\n parsed_features = tf.parse_single_example(example_proto, features)\n count = 0\n res = {}\n for name in data_shape:\n res[name] = parsed_features[name]\n if dtypes[count]!=str:\n res[name]=tf.decode_raw(res[name],dtypes[count])\n if dtypes[count]==tf.float32 or dtypes[count]==tf.float64:\n res[name]=tf.convert_to_tensor(res[name],dtype=dtypes[count])\n if data_shape[name]:\n res[name]=tf.reshape(res[name],shape=data_shape[name])\n count += 1\n return res", "def build_tfrecord_input(conf, training=True):\n filenames = gfile.Glob(os.path.join(conf['data_dir'], '*'))\n if not filenames:\n raise RuntimeError('No data_files files found.')\n\n index = int(np.floor(conf['train_val_split'] * len(filenames)))\n if training:\n filenames = filenames[:index]\n else:\n filenames = filenames[index:]\n\n if conf['visualize']:\n filenames = gfile.Glob(os.path.join(conf['data_dir'], '*'))\n print 'using input file', filenames\n shuffle = False\n else: shuffle = True\n\n filename_queue = tf.train.string_input_producer(filenames, shuffle=shuffle)\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n\n image_aux1_seq, image_main_seq, endeffector_pos_seq, action_seq, object_pos_seq, init_pix_distrib_seq = [], [], [], [], [], []\n init_pix_pos_seq = []\n\n load_indx = range(0, 30, conf['skip_frame'])\n load_indx = load_indx[:conf['sequence_length']]\n print 'using frame sequence: ', load_indx\n\n for i in load_indx:\n if 'single_view' not in conf:\n image_main_name = str(i) + '/image_main/encoded'\n image_aux1_name = str(i) + '/image_aux1/encoded'\n action_name = str(i) + '/action'\n endeffector_pos_name = str(i) + '/endeffector_pos'\n # state_name = 'move/' +str(i) + '/state'\n\n if 'canon_ex' in conf:\n init_pix_pos_name = '/init_pix_pos'\n init_pix_distrib_name = str(i) +'/init_pix_distrib'\n\n features = {\n\n image_aux1_name: tf.FixedLenFeature([1], tf.string),\n action_name: tf.FixedLenFeature([ACION_DIM], tf.float32),\n endeffector_pos_name: tf.FixedLenFeature([STATE_DIM], tf.float32),\n }\n if 'single_view' not in conf:\n (features[image_main_name]) = tf.FixedLenFeature([1], tf.string)\n\n if 'canon_ex' in conf:\n (features[init_pix_distrib_name]) = tf.FixedLenFeature([1], tf.string)\n (features[init_pix_pos_name]) = tf.FixedLenFeature([2], tf.float32)\n\n features = tf.parse_single_example(serialized_example, features=features)\n\n COLOR_CHAN = 3\n if '128x128' in conf:\n ORIGINAL_WIDTH = 128\n ORIGINAL_HEIGHT = 128\n IMG_WIDTH = 128\n IMG_HEIGHT = 128\n else:\n ORIGINAL_WIDTH = 64\n ORIGINAL_HEIGHT = 64\n IMG_WIDTH = 64\n IMG_HEIGHT = 64\n\n if 'single_view' not in conf:\n image = tf.decode_raw(features[image_main_name], tf.uint8)\n image = tf.reshape(image, shape=[1,ORIGINAL_HEIGHT*ORIGINAL_WIDTH*COLOR_CHAN])\n image = tf.reshape(image, shape=[ORIGINAL_HEIGHT, ORIGINAL_WIDTH, COLOR_CHAN])\n if IMG_HEIGHT != IMG_WIDTH:\n raise ValueError('Unequal height and width unsupported')\n crop_size = min(ORIGINAL_HEIGHT, ORIGINAL_WIDTH)\n image = tf.image.resize_image_with_crop_or_pad(image, crop_size, crop_size)\n image = tf.reshape(image, [1, crop_size, crop_size, COLOR_CHAN])\n image = tf.image.resize_bicubic(image, [IMG_HEIGHT, IMG_WIDTH])\n image = tf.cast(image, tf.float32) / 255.0\n image_main_seq.append(image)\n\n image = tf.decode_raw(features[image_aux1_name], tf.uint8)\n image = tf.reshape(image, shape=[1, ORIGINAL_HEIGHT * ORIGINAL_WIDTH * COLOR_CHAN])\n image = tf.reshape(image, shape=[ORIGINAL_HEIGHT, ORIGINAL_WIDTH, COLOR_CHAN])\n if IMG_HEIGHT != IMG_WIDTH:\n raise ValueError('Unequal height and width unsupported')\n crop_size = min(ORIGINAL_HEIGHT, ORIGINAL_WIDTH)\n image = tf.image.resize_image_with_crop_or_pad(image, crop_size, crop_size)\n image = tf.reshape(image, [1, crop_size, crop_size, COLOR_CHAN])\n image = tf.image.resize_bicubic(image, [IMG_HEIGHT, IMG_WIDTH])\n image = tf.cast(image, tf.float32) / 255.0\n image_aux1_seq.append(image)\n\n if 'canon_ex' in conf:\n init_pix_distrib = tf.decode_raw(features[init_pix_distrib_name], tf.uint8)\n init_pix_distrib = tf.reshape(init_pix_distrib, shape=[1, ORIGINAL_HEIGHT * ORIGINAL_WIDTH])\n init_pix_distrib = tf.reshape(init_pix_distrib, shape=[ORIGINAL_HEIGHT, ORIGINAL_WIDTH, 1])\n crop_size = min(ORIGINAL_HEIGHT, ORIGINAL_WIDTH)\n init_pix_distrib = tf.image.resize_image_with_crop_or_pad(init_pix_distrib, crop_size, crop_size)\n init_pix_distrib = tf.reshape(init_pix_distrib, [1, crop_size, crop_size, 1])\n init_pix_distrib = tf.image.resize_bicubic(init_pix_distrib, [IMG_HEIGHT, IMG_WIDTH])\n init_pix_distrib = tf.cast(init_pix_distrib, tf.float32) / 255.0\n init_pix_distrib_seq.append(init_pix_distrib)\n\n init_pix_pos = tf.reshape(features[init_pix_pos_name], shape=[1, 2])\n init_pix_pos_seq.append(init_pix_pos)\n\n endeffector_pos = tf.reshape(features[endeffector_pos_name], shape=[1, STATE_DIM])\n endeffector_pos_seq.append(endeffector_pos)\n action = tf.reshape(features[action_name], shape=[1, ACION_DIM])\n action_seq.append(action)\n\n if 'single_view' not in conf:\n # image_main_seq = tf.concat(values=image_main_seq, axis=0)\n image_main_seq = tf.concat(concat_dim=0, values=image_main_seq)\n\n # image_aux1_seq = tf.concat(values=image_aux1_seq, axis=0)\n image_aux1_seq = tf.concat(concat_dim=0, values=image_aux1_seq)\n\n if conf['visualize']: num_threads = 1\n else: num_threads = np.min((conf['batch_size'], 32))\n\n if 'ignore_state_action' in conf:\n [image_main_batch, image_aux1_batch] = tf.train.batch(\n [image_main_seq, image_aux1_seq],\n conf['batch_size'],\n num_threads=num_threads,\n capacity=100 * conf['batch_size'])\n return image_main_batch, image_aux1_batch, None, None\n elif 'canon_ex' in conf:\n endeffector_pos_seq = tf.concat(endeffector_pos_seq, 0)\n action_seq = tf.concat(action_seq, 0)\n\n init_pix_pos_seq = tf.concat(init_pix_pos_seq, 0)\n init_pix_distrib_seq = tf.concat(init_pix_distrib_seq, 0)\n\n [image_aux1_batch, action_batch, endeffector_pos_batch, init_pix_distrib_batch, init_pix_pos_batch] = tf.train.batch(\n [image_aux1_seq, action_seq, endeffector_pos_seq, init_pix_distrib_seq, init_pix_pos_seq],\n conf['batch_size'],\n num_threads=num_threads,\n capacity=100 * conf['batch_size'])\n return image_aux1_batch, action_batch, endeffector_pos_batch, init_pix_distrib_batch, init_pix_pos_batch\n\n elif 'single_view' in conf:\n # endeffector_pos_seq = tf.concat(endeffector_pos_seq, 0)\n # action_seq = tf.concat(action_seq, 0)\n endeffector_pos_seq = tf.concat(0, endeffector_pos_seq)\n action_seq = tf.concat(0, action_seq)\n [image_aux1_batch, action_batch, endeffector_pos_batch] = tf.train.batch(\n [image_aux1_seq, action_seq, endeffector_pos_seq],\n conf['batch_size'],\n num_threads=num_threads,\n capacity=100 * conf['batch_size'])\n return image_aux1_batch, action_batch, endeffector_pos_batch\n\n else:\n # endeffector_pos_seq = tf.concat(endeffector_pos_seq, 0)\n endeffector_pos_seq = tf.concat(0, endeffector_pos_seq)\n # action_seq = tf.concat(action_seq, 0)\n action_seq = tf.concat(0, action_seq)\n [image_main_batch, image_aux1_batch, action_batch, endeffector_pos_batch] = tf.train.batch(\n [image_main_seq,image_aux1_seq, action_seq, endeffector_pos_seq],\n conf['batch_size'],\n num_threads=num_threads,\n capacity=100 * conf['batch_size'])\n return image_main_batch, image_aux1_batch, action_batch, endeffector_pos_batch", "def serve_tf_examples_fn(serialized_tf_examples):\n feature_spec = tf_transform_output.raw_feature_spec()\n feature_spec.pop(label_column)\n parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)\n transformed_features = model.tft_layer(parsed_features)\n return model(transformed_features)", "def input_fn(params):\n\n batch_size = params[\"batch_size\"]\n output_buffer_size = batch_size * 1000\n\n def extract_fn(data_record):\n features = {\n \"query_ids\": tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True),\n \"doc_ids\": tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True),\n \"label\": tf.FixedLenFeature([], tf.int64),\n }\n sample = tf.parse_single_example(data_record, features)\n\n query_ids = tf.cast(sample[\"query_ids\"], tf.int32)\n doc_ids = tf.cast(sample[\"doc_ids\"], tf.int32)\n label_ids = tf.cast(sample[\"label\"], tf.int32)\n input_ids = tf.concat((query_ids, doc_ids), 0)\n\n query_segment_id = tf.zeros_like(query_ids)\n doc_segment_id = tf.ones_like(doc_ids)\n segment_ids = tf.concat((query_segment_id, doc_segment_id), 0)\n\n input_mask = tf.ones_like(input_ids)\n\n features = {\n \"input_ids\": input_ids,\n \"segment_ids\": segment_ids,\n \"input_mask\": input_mask,\n \"label_ids\": label_ids,\n }\n return features\n\n dataset = tf.data.TFRecordDataset([dataset_path])\n dataset = dataset.map(\n extract_fn, num_parallel_calls=4).prefetch(output_buffer_size)\n\n if is_training:\n dataset = dataset.repeat()\n dataset = dataset.shuffle(buffer_size=1000)\n else:\n if max_eval_examples:\n # Use at most this number of examples (debugging only).\n dataset = dataset.take(max_eval_examples)\n # pass\n\n dataset = dataset.padded_batch(\n batch_size=batch_size,\n padded_shapes={\n \"input_ids\": [seq_length],\n \"segment_ids\": [seq_length],\n \"input_mask\": [seq_length],\n \"label_ids\": [],\n },\n padding_values={\n \"input_ids\": 0,\n \"segment_ids\": 0,\n \"input_mask\": 0,\n \"label_ids\": 0,\n },\n drop_remainder=True)\n\n return dataset", "def parse_record(raw_record, is_training, dtype):\n\n templar_buffer, search_buffer, templar_bbox, search_bbox = parse_example_proto(raw_record)\n templar_img, search_img, score, score_weight, tight_temp_bbox, tight_search_bbox = preprocess_pair(\n templar_buffer=templar_buffer, search_buffer=search_buffer, templar_bbox=templar_bbox,\n search_bbox=search_bbox, num_channels=_NUM_CHANNELS, is_training=is_training)\n\n templar_img = tf.cast(templar_img, dtype)\n search_img = tf.cast(search_img, dtype)\n score = tf.cast(score, tf.int32)\n score_weight = tf.cast(score_weight, dtype)\n tight_temp_bbox = tf.cast(tight_temp_bbox, tf.int32)\n #tight_search_bbox = tf.cast(tight_search_bbox, tf.int32)\n\n dict = {'templar': templar_img, 'search': search_img, 'score': score, 'score_weight': score_weight,\n 'tight_temp_bbox': tight_temp_bbox, 'tight_search_bbox': tight_search_bbox}\n\n return dict", "def _convert_example(self, output_file, data_dict):\n print('Generating %s' % output_file)\n with tf.compat.v1.python_io.TFRecordWriter(output_file) as record_writer:\n data = data_dict['data'].astype(np.int8)\n labels = data_dict['label'].astype(np.int64)\n num_entries_in_batch = len(labels)\n for i in tqdm(range(num_entries_in_batch)):\n example = tf.train.Example(features=tf.train.Features(\n feature={\n 'data': _bytes_feature(data[i].tobytes()),\n 'label': _int_feature(labels[i]),\n }))\n record_writer.write(example.SerializeToString())", "def _convert_example(self, output_file, data_dict):\n print('Generating %s' % output_file)\n with tf.compat.v1.python_io.TFRecordWriter(output_file) as record_writer:\n data = data_dict['data'].astype(np.int8)\n labels = data_dict['label'].astype(np.int64)\n num_entries_in_batch = len(labels)\n for i in tqdm(range(num_entries_in_batch)):\n example = tf.train.Example(features=tf.train.Features(\n feature={\n 'data': _bytes_feature(data[i].tobytes()),\n 'label': _int_feature(labels[i]),\n }))\n record_writer.write(example.SerializeToString())", "def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n print(\"*** Example ***\")\n print(\"guid: %s\" % (example.guid))\n print(\"tokens: %s\" % \" \".join([str(x) for x in tokens]))\n print(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n print(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n print(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n print(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature", "def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature", "def file_based_convert_examples_to_features_v2(\n examples, max_seq_length, encoder, output_file, verbose=False):\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (_, example) in enumerate(examples):\n\n fea = process_single_example(\n example, max_seq_length, encoder)\n\n if verbose:\n print(fea[\"x1x2yx1xx2_len\"])\n\n def _create_int_feature(values):\n return tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n\n features = collections.OrderedDict()\n features[\"x1_ids\"] = _create_int_feature(fea[\"x1_ids\"])\n features[\"x1_len\"] = _create_int_feature([fea[\"x1_len\"]])\n features[\"x1x2_ids\"] = _create_int_feature(fea[\"x1x2_ids\"])\n features[\"x1x2_len\"] = _create_int_feature([fea[\"x1x2_len\"]])\n features[\"x1x2y_ids\"] = _create_int_feature(fea[\"x1x2y_ids\"])\n features[\"x1x2y_len\"] = _create_int_feature([fea[\"x1x2y_len\"]])\n features[\"x1xx2_ids\"] = _create_int_feature(fea[\"x1xx2_ids\"])\n features[\"x1xx2_len\"] = _create_int_feature([fea[\"x1xx2_len\"]])\n features[\"x1x2yx1xx2_ids\"] = _create_int_feature(fea[\"x1x2yx1xx2_ids\"])\n features[\"x1x2yx1xx2_len\"] = _create_int_feature([fea[\"x1x2yx1xx2_len\"]])\n features[\"x1x2yx1my_ids\"] = _create_int_feature(fea[\"x1x2yx1my_ids\"])\n features[\"x1x2yx1my_len\"] = _create_int_feature([fea[\"x1x2yx1my_len\"]])\n features[\"x1x2yx1m_len\"] = _create_int_feature([fea[\"x1x2yx1m_len\"]])\n features[\"x1x2yx1xx2yy_ids\"] = _create_int_feature(fea[\"x1x2yx1xx2yy_ids\"])\n features[\"x1x2yx1xx2yy_len\"] = _create_int_feature([fea[\"x1x2yx1xx2yy_len\"]])\n\n tf_example = tf.train.Example(\n features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())", "def process(self, example):\n self.get_counter(\"examples-total\").inc()\n label = example_util.get_bytes_feature(example, _LABEL_COLUMN)[0]\n self.get_counter(\"examples-{}\".format(label)).inc()\n yield example", "def _parse_el_example(array_feats, array_feat_types, quant_feats):\n out_example = []\n d_keys = sorted(array_feats.keys())\n for k in d_keys:\n n_feat = quant_feats[k]\n point_feat = tf.decode_raw(array_feats[k], array_feat_types[k])\n point_feat = tf.reshape(point_feat, [quant_feats[k]])\n out_example.append(point_feat)\n return tuple(out_example)", "def parse_example(example):\n features = {\n 'input_ids': tf.io.VarLenFeature(tf.int64),\n 'label_ids': tf.io.VarLenFeature(tf.int64)\n }\n\n parsed_example = \\\n tf.io.parse_single_example(\n example, features=features)\n\n return {\n k: tf.sparse.to_dense(v) for k, v in\n parsed_example.items()\n }", "def input_fn(params):\n batch_size = params['batch_size']\n data_dir = params['data_dir']\n noise_dim = params['noise_dim']\n def parser(serialized_example):\n \"\"\"Parses a single tf.Example into image and label tensors.\"\"\"\n features = tf.parse_single_example(\n serialized_example,\n features={\n \"image\": tf.FixedLenFeature([], tf.string),\n \"label\": tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features[\"image\"], tf.uint8)\n image.set_shape([CHANNELS * HEIGHT * WIDTH])\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [CHANNELS, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32) * (2. / 255) - 1\n\n label = tf.cast(features['label'], tf.int32)\n\n random_noise = tf.random_normal([noise_dim])\n features = {\n 'real_images': image,\n 'random_noise': random_noise}\n\n return features, label\n\n # TODO we should use an eval dataset for EVAL # pylint: disable=fixme\n image_files = [os.path.join(data_dir, 'train.tfrecords')]\n tf.logging.info(image_files)\n dataset = tf.data.TFRecordDataset([image_files])\n dataset = dataset.map(parser, num_parallel_calls=batch_size)\n dataset = dataset.prefetch(4 * batch_size).cache().repeat()\n if USE_ALTERNATIVE:\n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n tf.logging.warning('Old version: Used tf.contrib.data.batch_and_drop_remainder instead of regular batch')\n else:\n dataset = dataset.batch(batch_size, drop_remainder=True)\n # Not sure why we use one_shot and not initializable_iterator\n features, labels = dataset.make_one_shot_iterator().get_next()\n\n return features, labels", "def convert_to_tfrecord(data_files, label_files, output_file, num_steps, test_flag):\n print('Generating %s' % output_file)\n\n with tf.python_io.TFRecordWriter(output_file) as record_writer:\n\n for idx in enumerate(data_files):\n\n print('Working on %s' % data_files[idx[0]])\n print('Working on %s' % label_files[idx[0]])\n\n #data = _read_data(data_files[idx[0]])\n #label = _read_data(label_files[idx[0]])\n\n #data = loadtxt(data_files[idx[0]])\n label = loadtxt(label_files[idx[0]])\n feat = [0,1,2,3]\n feat.extend(range(6,25))\n if test_flag:\n with open(data_files[idx[0]]) as infile:\n data = np.zeros([num_steps, 25])\n cnt = 0\n for line in infile:\n line = line.split()\n data[0:num_steps-1, :]=data[1:num_steps, :]\n data[num_steps-1,:]=line\n data1 = data\n data1[:,0] = signal.detrend(data1[:,0], axis=0)\n write_to_tfrecord(data1[:,feat], label[cnt:cnt+num_steps], num_steps, record_writer)\n cnt+=1\n else:\n with open(data_files[idx[0]]) as infile:\n data = []\n cnt = 1\n for line in infile:\n data.append(line.split())\n if cnt%num_steps==0:\n data = np.array(data, dtype=float)\n data.reshape(data.shape[0], -1)\n #data = signal.detrend(data, axis=0)\n write_to_tfrecord(data[:,feat], label[cnt-num_steps:cnt], num_steps, record_writer)\n data = []\n cnt=cnt+1", "def _preprocess_word_test_v1() -> lmp.dataset.AnalogyDataset:\n file_path = os.path.join(f'{lmp.path.DATA_PATH}', 'word-test.v1.txt')\n\n if not os.path.exists(file_path):\n raise FileNotFoundError(f'file {file_path} does not exist.')\n\n with open(file_path, 'r', encoding='utf8') as input_file:\n # Remove first line since it is just copyright.\n samples = [line.strip() for line in input_file.readlines()][1:]\n\n # Parsing.\n category = ''\n parsing_samples = []\n for sample in samples:\n # Category line.\n if re.match(r':', sample):\n category = sample[2:]\n continue\n\n # Word analogy line.\n parsing_samples.append(re.split(r'\\s+', sample) + [category])\n\n return lmp.dataset.AnalogyDataset(samples=parsing_samples)", "def read_examples(input_file):\n examples = []\n unique_id = 0\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n \n line = line.strip()\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n \n if m is None:\n text_a = line\n else:\n text_a = m.group(1)\n text_b = m.group(2)\n examples.append(InputExample(unique_id=unique_id,\n text_a=text_a, \n text_b=text_b))\n unique_id += 1\n return examples", "def parse_record_reid(raw_record):\n keys_to_features = {\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'depth': tf.FixedLenFeature([], tf.int64),\n 'label': tf.FixedLenFeature([], tf.int64)\n }\n parsed = tf.parse_single_example(raw_record, keys_to_features)\n # image = tf.image.decode_image(\n # tf.reshape(parsed['image_raw'], shape=[]), _DEPTH)\n\n image = tf.decode_raw(parsed['image_raw'], tf.uint8)\n # image = tf.to_float(tf.image.convert_image_dtype(image, dtype=tf.uint8))\n image = tf.reshape(image, [_HEIGHT, _WIDTH, 3])\n # image = tf.cast(image, tf.float32) * (1. / 255.0)\n image = tf.cast(image,tf.float32)\n\n label = tf.cast(parsed['label'],tf.int32)\n\n label = tf.one_hot(label, labels_nums, 1, 0)\n # labels={\"seg\":None,\"reid\":label}\n return image, label", "def _parse_function(example_proto):\n # Parse the tf.example according to the features_spec definition\n parsed_features = tf.parse_single_example(example_proto, features_spec)\n sequence = parsed_features[\"sequence\"]\n # Convert the sparse sequence tensor to dense.\n sequence_d = tf.sparse_to_dense(sequence.indices, sequence.dense_shape, sequence.values)\n # Return all the elements\n return parsed_features[\"sequence_length\"], parsed_features[\"label\"], sequence_d", "def _process_single(data):\n \n # Read data from tfrecord\n frame = open_dataset.Frame()\n frame.ParseFromString(bytearray(data.numpy()))\n scan_name = frame.context.name.replace('_','X') + 'FRAMENUM{}'.format(str(uuid4())) \n # process frame into data format we want\n # mesh_vertices, instance_labels, semantic_labels, instance_bboxes = prep_data(frame, 150000)\n\n for result in prep_data(frame, 150000):\n tag, data = result\n mesh_vertices, instance_labels, semantic_labels, instance_bboxes = data \n scan_name = frame.context.name.replace('_','X') + 'FRAMENUM{}'.format(str(uuid4())) \n if tag != 'FRUSTUM':\n FILENAME_TEMPLATE = BASE_OUTPUT_DIR + DEFAULT_OUTPUT_FILE_TEMPLATE\n ## Write mesh verticies\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"vert\"), 'wb+') as f:\n np.save(f, mesh_vertices)\n \n ## Write instance labels\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"ins_label\"), 'wb+') as f:\n np.save(f, instance_labels)\n \n ## Write semantic labels\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"sem_label\"), 'wb+') as f:\n np.save(f, semantic_labels)\n \n ## Write instance_bboxes labels\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"bbox\"), 'wb+') as f:\n np.save(f, instance_bboxes)\n else:\n FILENAME_TEMPLATE = BASE_OUTPUT_DIR + FRUSTUM_OUTPUT_FILE_TEMPLATE\n ## Write mesh verticies\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"vert\"), 'wb+') as f:\n np.save(f, mesh_vertices)\n \n ## Write instance labels\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"ins_label\"), 'wb+') as f:\n np.save(f, instance_labels)\n \n ## Write semantic labels\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"sem_label\"), 'wb+') as f:\n np.save(f, semantic_labels)\n \n ## Write instance_bboxes labels\n with open(FILENAME_TEMPLATE.format(scan_name=scan_name, data_type=\"bbox\"), 'wb+') as f:\n np.save(f, instance_bboxes)", "def serve_tf_examples_fn(serialized_tf_examples):\n feature_spec = tf_transform_output.raw_feature_spec()\n parsed_features = tf.io.parse_example(\n serialized_tf_examples, feature_spec)\n transformed_features = create_training_data(\n model.tft_layer(parsed_features))\n return model(transformed_features)", "def read_inaturalist(key='train', batch_size=64, image_size=299, target_size=None, do_augment=False, buffer_size=2000):\n\n data_size = {'train': 265213, 'val': 3030, 'test': 35350}\n data_label = {'train': 1, 'val': 1, 'test': 0}\n num_images = data_size[key]\n steps_per_epoch = num_images // batch_size\n skip_count = num_images % batch_size\n num_labels = data_label[key]\n num_classes = 1010\n if target_size is None:\n target_size = image_size\n if image_size != target_size:\n print('Image size {} does not equal target size {}. Resize to be done.'.format(image_size, target_size))\n\n filenames = os.listdir(FLAGS.DEFAULT_IN + 'tfrecords_{}/'.format(FLAGS.TARGET_SIZE))\n filenames = [filename.replace('.tfrecords', '') for filename in filenames if key in filename]\n if key == 'test':\n filenames = sorted(filenames) # test tfrecords must be read in order\n print('Reading tfrecords from {}'.format(FLAGS.DEFAULT_IN + 'tfrecords_{}/'.format(FLAGS.TARGET_SIZE)))\n print('The following tfrecords are read: {}'.format(filenames))\n\n dataset = ReadTFRecords(\n filenames, num_labels=num_labels, batch_size=batch_size, buffer_size=buffer_size,\n skip_count=skip_count, num_threads=8, decode_jpeg=True,\n use_one_hot_label=True, use_smooth_label=True if key == 'train' else False, num_classes=num_classes)\n if do_augment:\n from GeneralTools.inception_preprocessing import preprocess_image\n # apply basic data augmentation (random crops, random left-right flipping, color distortion)\n dataset.image_preprocessor(\n 3, image_size, image_size,\n image_augment_fun=lambda x: preprocess_image(\n x, height=target_size, width=target_size,\n is_training=True if key == 'train' else False, fast_mode=False))\n if target_size != image_size:\n dataset.batch_shape = [batch_size, target_size, target_size, 3] \\\n if FLAGS.IMAGE_FORMAT == 'channels_last' else [batch_size, 3, target_size, target_size]\n else:\n dataset.image_preprocessor(\n 3, image_size, image_size,\n resize=None if target_size == image_size else [target_size, target_size])\n dataset.scheduler(shuffle_data=False if key == 'test' else True)\n\n return dataset, steps_per_epoch", "def prepare_example(image_path, annotations, label_map_dict):\n print(\"encoding %s\" % image_path)\n with tf.gfile.GFile(image_path, 'rb') as fid:\n encoded_png = fid.read()\n encoded_png_io = io.BytesIO(encoded_png)\n image = pil.open(encoded_png_io)\n\n if image.format != 'PNG':\n raise ValueError('Image format error')\n\n key = hashlib.sha256(encoded_png).hexdigest()\n # obtain attributes\n width, height = image.size\n img_filename = image_path.split('/')[-1]\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n truncated = []\n occlud = []\n\n xmin.append(int(annotations[2]) / width)\n ymin.append(int(annotations[3]) / height)\n xmax.append(int(annotations[4]) / width)\n ymax.append(int(annotations[5]) / height)\n class_name = annotations[1]\n classes_text.append(class_name)\n classes.append(label_map_dict[class_name])\n classes_text = [class_text.encode('utf-8') for class_text in classes_text]\n trun, occ = annotations[6].split(',')\n truncated.append(int(trun))\n occlud.append(int(occ))\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(img_filename.encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(img_filename.encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_png),\n 'image/format': dataset_util.bytes_feature('png'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.int64_list_feature(occlud),\n }))\n return example", "def file_based_convert_examples_to_features(\n examples, max_seq_length, tokenizer, output_file\n):\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n feature = convert_single_example(ex_index, example, max_seq_length, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"is_real_example\"] = create_int_feature([int(feature.is_real_example)])\n if isinstance(feature.label_ids, list):\n label_ids = feature.label_ids\n else:\n label_ids = feature.label_ids[0]\n features[\"label_ids\"] = create_int_feature(label_ids)\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()", "def dict_to_tf_example(data, label_map_dict):\n\n encoded_jpg_io = io.BytesIO()\n image = data['image']\n image.save(encoded_jpg_io, \"JPEG\", quality=80)\n encoded_jpg = encoded_jpg_io.getvalue()\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n width, height = image.size\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n rotation = []\n classes = []\n classes_text = []\n truncated = []\n poses = []\n masks = []\n difficult_obj = []\n for obj in data['object']:\n difficult = bool(int(obj['difficult']))\n difficult_obj.append(int(difficult))\n\n xmin.append(float(obj['bndbox']['xmin']) / width)\n ymin.append(float(obj['bndbox']['ymin']) / height)\n xmax.append(float(obj['bndbox']['xmax']) / width)\n ymax.append(float(obj['bndbox']['ymax']) / height)\n rotation.append(float(obj['rotation']))\n masks.append(obj['mask'])\n classes_text.append(obj['name'].encode('utf8'))\n classes.append(label_map_dict[obj['name']])\n truncated.append(int(obj['truncated']))\n poses.append(obj['pose'].encode('utf8'))\n\n mask = np.stack(masks)\n encoded_mask = pn_encode(mask.flatten()).tolist()\n print('mask encode:', mask.shape, '->', len(encoded_mask)) ###\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/rotation': dataset_util.float_list_feature(rotation),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),\n 'image/object/truncated': dataset_util.int64_list_feature(truncated),\n 'image/object/view': dataset_util.bytes_list_feature(poses),\n 'image/segmentation/object': dataset_util.int64_list_feature(encoded_mask),\n 'image/segmentation/object/class': dataset_util.int64_list_feature(classes),\n }))\n return example", "def file_based_convert_examples_to_features(examples,label_list,output_file):\n writer = tf.python_io.TFRecordWriter(output_file)\n for(ex_index, example) in enumerate(examples):\n if ex_index%10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list)\n \n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n features = {\n \"\":tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))),\n }\n\n tf_example = tf.train.Example(features=tf.train.Features(features=features))\n writer.write(tf_example.SerializeToString())", "def convert_examples_to_features(self, examples, tfrecord_file, label_names):\n writer = tf.io.TFRecordWriter(tfrecord_file)\n\n label_to_id = dict((name, i) for i, name in enumerate(label_names))\n for example in examples:\n features = collections.OrderedDict()\n\n label_id = label_to_id[example.label]\n input_ids = [label_id] * self.seq_len\n\n features['input_ids'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(input_ids)))\n features['label_ids'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list([label_id])))\n tf_example = tf.train.Example(\n features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()", "def parse_single_example(serialized, features, name=None, example_names=None):\n return parse_single_example_v2(serialized, features, example_names, name)", "def process_example(self):\n\n name_files, transition_funcs = self.treat.get_transition_functions()\n for name_file, transition_func in zip(name_files, transition_funcs):\n print(f\"Name file: {name_file}\")\n self.afd(transition_func, self.q0, self.qfs, self.words)\n print('-'*50)", "def from_tfrecords(self) -> tf.data.Dataset:\n dataset = self.parser()\n\n def decode_rawdata(input_records):\n return self.decoder(input_records) # pylint: enable=g-long-lambda\n\n dataset = dataset.map(decode_rawdata, num_parallel_calls=self.AUTOTUNE)\n dataset = dataset.prefetch(self.config.batch_size)\n\n if self._training:\n dataset = dataset.map(\n lambda image_id, image, bbox, classes: self.augmenter(\n image, bbox, image_id, classes, return_image_label=False\n )\n )\n\n dataset = dataset.map(lambda *args: self.encoder(*args))\n\n # pad to fixed length.\n dataset = dataset.map(\n self.pad_to_fixed_len,\n num_parallel_calls=self.AUTOTUNE,\n )\n\n # make batches.\n dataset = dataset.batch(\n self.config.batch_size, drop_remainder=self._drop_remainder\n )\n dataset = self.pretraining(dataset)\n return dataset", "def parse_example(example):\n metadata, data = example.strip().split('\\n\\n')\n metadata = pytoml.loads(metadata)\n metadata['success'] = metadata['result'] == 'success'\n metadata['name'] = re.sub(r'[ -]', '_', metadata['name'].lower())\n del metadata['result']\n return Example(data=data.strip(), **metadata)", "def parser(serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'env': tf.FixedLenFeature([1, 4], tf.int64),\n # 'env_segment_number': tf.FixedLenFeature([], tf.int64),\n # 'env_segment_cpu': tf.FixedLenFeature([], tf.int64),\n # 'env_segment_mem': tf.FixedLenFeature([], tf.int64),\n # 'query_plan_ops': tf.VarLenFeature(tf.string),\n # 'query_table_size': tf.VarLenFeature(tf.float32),\n # 'segment_cpu_usage': tf.VarLenFeature(tf.float32),\n 'label': tf.FixedLenFeature([], tf.float32)\n })\n env = tf.cast(features['env'], tf.float32)\n # image.set_shape([DEPTH * HEIGHT * WIDTH])\n\n # # Reshape from [depth * height * width] to [depth, height, width].\n # image = tf.cast(\n # tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),\n # tf.float32)\n label = tf.cast(features['label'], tf.float32)\n reshape_label = tf.reshape(features['label'], (1,1))\n return env, reshape_label", "def parse_fn(serialized_example):\n\n features = {\n 'video': tf.io.FixedLenFeature([], tf.string),\n 'label': tf.io.FixedLenFeature([], tf.int64),\n 'seq_len': tf.io.FixedLenFeature([], tf.int64),\n 'height': tf.io.FixedLenFeature([], tf.int64),\n 'width': tf.io.FixedLenFeature([], tf.int64),\n 'channels': tf.io.FixedLenFeature([], tf.int64)\n }\n\n # Parse the input tf.Example proto using the dictionary above.\n parsed = tf.io.parse_single_example(serialized_example, features)\n\n # Decodes and reshapes video\n seq_len = tf.cast(parsed['seq_len'], tf.uint32)\n height = tf.cast(parsed['height'], tf.uint32)\n width = tf.cast(parsed['width'], tf.uint32)\n channels = tf.cast(parsed['channels'], tf.uint32)\n video = tf.io.decode_raw(parsed['video'], tf.uint8)\n video = tf.reshape(video, shape=[seq_len, height, width, channels])\n\n # Normalizes video frames, label\n video = tf.cast(video, tf.float32) / 255\n label = tf.cast(parsed['label'], tf.float32)\n return video, label", "def serve_tf_examples_fn(serialized_tf_examples):\n feature_spec = tf_transform_output.raw_feature_spec()\n feature_spec.pop('income_bracket')\n parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)\n transformed_features = model.tft_layer(parsed_features)\n return model(transformed_features)", "def file_based_convert_examples_to_features(\n examples, label_list, output_mode, max_seq_length, max_predictions_per_seq, tokenizer, output_file):\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list, output_mode,\n max_seq_length, max_predictions_per_seq, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n def create_float_feature(values):\n f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))\n return f\n\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"masked_lm_positions\"] = create_int_feature(feature.masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(feature.masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(feature.masked_lm_weights)\n if output_mode == 'classification':\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n elif output_mode == 'regression':\n features[\"label_ids\"] = create_float_feature([feature.label_id])\n else:\n raise KeyError(mode)\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()", "def parse_sequence_example(serialized, image_feature, caption_feature):\n\tcontext, sequence = tf.parse_single_sequence_example(\n\t\t\tserialized,\n\t\t\tcontext_features={\n\t\t\t\t\timage_feature: tf.FixedLenFeature([], dtype=tf.string)\n\t\t\t},\n\t\t\tsequence_features={\n\t\t\t\t\tcaption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64),\n\t\t\t})\n\n\tencoded_image = context[image_feature]\n\tcaption = sequence[caption_feature]\n\treturn encoded_image, caption", "def load_tfrecord(fname: str, logger_tag: str) -> tf.data.Dataset:\n logger = logging.getLogger(logger_tag)\n logger.info('Start loading dataset for file %s', fname)\n raw_dataset = tf.data.TFRecordDataset([fname])\n\n def _parse(example_proto):\n feature_description = {\n KEY_IMAGE_BYTES: tf.io.FixedLenFeature([], tf.string, default_value=''),\n KEY_CLASS: tf.io.FixedLenFeature([], tf.int64, default_value=-1),\n }\n return collections.OrderedDict(\n tf.io.parse_single_example(example_proto, feature_description)\n )\n\n ds = raw_dataset.map(_parse)\n\n def _transform(item):\n return collections.OrderedDict([\n (KEY_IMAGE_DECODED, tf.io.decode_jpeg(item[KEY_IMAGE_BYTES])),\n (KEY_CLASS, tf.reshape(item[KEY_CLASS], [1])),\n ])\n\n ds = ds.map(_transform)\n logger.info('Finished loading dataset for file %s', fname)\n return ds", "def convert_example(example, tokenizer):\n\n feature = tokenizer(\n text=example['question'],\n text_pair=example['answer'],\n max_seq_len=args.max_seq_length)\n feature['labels'] = example['labels']\n feature['id'] = example['id']\n\n return feature" ]
[ "0.684948", "0.6609394", "0.6605501", "0.6414771", "0.6374184", "0.63535416", "0.6350793", "0.63333046", "0.63030624", "0.6258336", "0.62369555", "0.6210575", "0.6175964", "0.61759615", "0.6129125", "0.61166155", "0.6116493", "0.6086684", "0.60770196", "0.6074543", "0.6071042", "0.6033146", "0.60206956", "0.6011301", "0.6000911", "0.5988553", "0.5987053", "0.598548", "0.5967405", "0.5957685", "0.59435374", "0.59435374", "0.59435374", "0.5940338", "0.5914137", "0.5895183", "0.58835936", "0.58824855", "0.58812433", "0.58711296", "0.5857881", "0.58363825", "0.5827167", "0.58229834", "0.58203435", "0.5808087", "0.57930875", "0.57859945", "0.5783976", "0.5783169", "0.5766456", "0.5765272", "0.5761306", "0.5741451", "0.5731082", "0.5720027", "0.5719679", "0.5715224", "0.5703166", "0.5701501", "0.5678712", "0.56775373", "0.5676691", "0.56708765", "0.5668256", "0.56567186", "0.564752", "0.5641986", "0.5641986", "0.5640632", "0.5640632", "0.5639226", "0.5636407", "0.56360734", "0.5634506", "0.5630603", "0.5627476", "0.5625184", "0.5623052", "0.56203073", "0.5604895", "0.5599973", "0.55928874", "0.5592577", "0.55751294", "0.5571051", "0.5570918", "0.5563897", "0.5554011", "0.55524343", "0.55523545", "0.55390525", "0.5517161", "0.5513479", "0.5511363", "0.5510697", "0.5507934", "0.54937315", "0.5491583", "0.5489204" ]
0.5721495
55
3x3 convolution with padding
def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conv3x3(in_planes, out_planes, stride=1, dilation=1, padding=1):\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=padding,\n dilation=dilation,\n bias=False)", "def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1, groups=1, bias=False):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=padding, dilation=dilation, groups=groups,bias=bias)", "def conv2d(args):\n inp_ = args[0]\n kernel = args[1]\n stride = args[2]\n padding = args[3]\n (batch_size, in_channels, H, W) = inp_.shape\n (out_channels, in_channels_t, Hk, Wk) = kernel.shape\n Hc = int((H - Hk)/stride)+1\n Wc = int((W - Wk)/stride)+1\n conv_layer = np.zeros((batch_size, out_channels, Hc, Wc))\n for batch_i in range(batch_size):\n for o_chann_i in range(out_channels):\n for in_chann_i in range(in_channels):\n curr_ker = kernel[o_chann_i, in_chann_i, :, :]\n curr_inp = inp_[batch_i, in_chann_i, :, :]\n h_ind = 0\n while h_ind + Hk <= H:\n w_ind = 0\n while w_ind + Wk <= W:\n inp_patch = curr_inp[h_ind:h_ind+Hk, w_ind:w_ind+Wk]\n # Sum the conv_value of all the inp_channels\n conv_layer[batch_i, o_chann_i, h_ind//stride, w_ind//stride] += np.sum(inp_patch*curr_ker)\n w_ind+=stride\n h_ind+=stride\n return conv_layer", "def conv3x3_with_neigh(in_planes, out_planes, stride=1, padding=1, bias=False):\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=padding,\n bias=bias)", "def conv3x3(in_planes, out_planes, stride=1) -> nn.Conv2d:\n return nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False,\n )", "def conv3x3(in_planes, out_planes, stride=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, dilation=1):\n return nn.Conv2d(\n in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False\n )", "def conv3x3(in_ch, out_ch, stride=1):\n return nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=1)", "def clConvolution(self, size, mask):", "def convolve(images, kernels, padding='same', stride=(1, 1)):\n m = images.shape[0]\n h = images.shape[1]\n w = images.shape[2]\n c = images.shape[3]\n kh = kernels.shape[0]\n kw = kernels.shape[1]\n nc = kernels.shape[3]\n sh = stride[0]\n sw = stride[1]\n\n if padding == 'same':\n ph = max((h - 1) * sh + kh - h, 0)\n pt = int(np.ceil(ph / 2))\n pb = pt\n pw = max((w - 1) * sw + kw - w, 0)\n pl = int(np.ceil(pw / 2))\n pr = pl\n elif padding == 'valid':\n pt, pb, pl, pr = 0, 0, 0, 0\n else:\n pt, pb = padding[0], padding[0]\n pl, pr = padding[1], padding[1]\n\n oh = ((h - kh + pt + pb) // sh) + 1\n ow = ((w - kw + pl + pr) // sw) + 1\n\n images = np.pad(images, pad_width=((0, 0), (pt, pb), (pl, pr), (0, 0)),\n mode='constant', constant_values=0)\n\n conv = np.zeros((m, oh, ow, nc))\n for k in range(nc):\n for i in range(oh):\n for j in range(ow):\n aux = images[:, i * sh:i * sh + kh, j * sw:j * sw + kw] \\\n * kernels[:, :, :, k]\n conv[:, i, j, k] = np.sum(aux, axis=(1, 2, 3))\n return conv", "def convolution(img, kernel, padding=True):\n result = np.zeros_like(img)\n p_size_i = kernel.shape[0] // 2\n p_size_j = kernel.shape[1] // 2\n\n if padding:\n padded_img = np.zeros((img.shape[0] + 2 * p_size_i, img.shape[1] + 2 * p_size_j))\n i_first = p_size_i\n i_last = padded_img.shape[0] - p_size_i - 1\n j_first = p_size_j\n j_last = padded_img.shape[1] - p_size_j - 1\n padded_img[i_first: i_last + 1, j_first: j_last + 1] = img\n else:\n padded_img = img.copy()\n i_first = p_size_i\n i_last = padded_img.shape[0] - p_size_i - 1\n j_first = p_size_j\n j_last = padded_img.shape[1] - p_size_j - 1\n \n for i in range(i_first, i_last):\n for j in range(j_first, j_last):\n window = padded_img[i - p_size_i: i + p_size_i + 1, j - p_size_j: j + p_size_j + 1]\n res_pix = np.sum(window * kernel)\n result[i - p_size_i, j - p_size_j] = res_pix\n return result", "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_channels, out_channels, stride=1, \n padding=1, bias=True, groups=1):\n return nn.Conv2d(\n in_channels, \n out_channels, \n kernel_size=3, \n stride=stride,\n padding=padding,\n bias=bias,\n groups=groups)", "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "def conv3x3(in_planes, out_planes, stride=1):\n\treturn nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n\t\t\t\t\t padding=1, bias=False)", "def conv3x3(in_planes, out_planes, stride=1):\n\treturn nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n\t\t\t\t\t padding=1, bias=False)", "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, groups=groups, \\\n dilation=dilation, padding=dilation, bias=False)", "def conv3x3(in_planes, out_planes, stride = 1):\n return nn.Conv2d(in_planes, out_planes, kernel_size = 3, stride = stride,\n padding = 1, bias = False)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1,num_group=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False,groups=num_group)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation,\n groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n groups=groups,\n bias=False,\n dilation=dilation,\n )", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=(3,3), stride=stride, padding = 1,\n bias=False)", "def conv3d(value, weights, padding=\"SYMMETRIC\"):\n return tf.nn.conv3d(value, weights, strides=[1, 1, 1, 1, 1], padding=\"SAME\")", "def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(\n in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False\n )", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def conv3x3(in_planes, out_planes, stride=1, bias=False, group=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,padding=1, groups=group, bias=bias)", "def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m = images.shape[0]\n image_h = images.shape[1]\n image_w = images.shape[2]\n filter_h = kernel.shape[0]\n filter_w = kernel.shape[1]\n s1 = stride[0]\n s2 = stride[1]\n\n if padding == 'valid':\n pad_h = 0\n pad_w = 0\n\n if padding == 'same':\n pad_h = int(((image_h - 1) * s1 + filter_h - image_h) / 2) + 1\n pad_w = int(((image_w - 1) * s2 + filter_w - image_w) / 2) + 1\n\n if type(padding) == tuple:\n pad_h = padding[0]\n pad_w = padding[1]\n\n n_dim1 = int((image_h + 2 * pad_h - filter_h) / stride[0]) + 1\n n_dim2 = int((image_w + 2 * pad_w - filter_w) / stride[1]) + 1\n convolve = np.zeros((m, n_dim1, n_dim2))\n new_images = np.pad(images, ((0, 0), (pad_h, pad_h), (pad_w, pad_w),\n (0, 0)), mode='constant')\n for x in range(n_dim1):\n for y in range(n_dim2):\n mini_matrix = new_images[:, x * s1: x * s1 + filter_h,\n y * s2: y * s2 + filter_w, :]\n values = np.sum(mini_matrix * kernel,\n axis=1).sum(axis=1).sum(axis=1)\n convolve[:, x, y] = values\n return (convolve)", "def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m, h, w, c = images.shape\n KernelHeight, kernelWidth, c = kernel.shape\n StrideHeight, StrideWidth = stride\n\n if padding == 'valid':\n PaddingHeight = 0\n PaddingWidth = 0\n elif padding == 'same':\n PaddingHeight = int(\n (((h - 1) * StrideHeight + KernelHeight - h) / 2) + 1)\n PaddingWidth = int((((w - 1) * StrideWidth + kernelWidth - w) / 2) + 1)\n else:\n PaddingHeight, PaddingWidth = padding\n\n OutputH = int(((h + 2 * PaddingHeight - KernelHeight) / StrideHeight) + 1)\n OutputW = int(((w + 2 * PaddingWidth - kernelWidth) / StrideWidth) + 1)\n\n ImagePadded = np.pad(\n images,\n ((0, 0), (PaddingHeight, PaddingHeight),\n (PaddingWidth, PaddingWidth), (0, 0)),\n 'constant'\n )\n\n output = np.zeros((m, OutputH, OutputW))\n ImageRange = np.arange(m)\n\n for i_OutputH in range(OutputH):\n for i_OutputW in range(OutputW):\n s_i_OutputH = i_OutputH * StrideHeight\n s_i_OutputW = i_OutputW * StrideWidth\n flt = ImagePadded[ImageRange,\n s_i_OutputH:KernelHeight + s_i_OutputH,\n s_i_OutputW:kernelWidth + s_i_OutputW,\n :]\n output[ImageRange, i_OutputH, i_OutputW] = np.sum(\n flt * kernel, axis=(1, 2, 3))\n return output", "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv1d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "def conv2d_forward(x, w, b, pad, stride):\n #raise NotImplementedError\n \n\n \n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n ba,h,wd,c=x.shape\n f,fh,fw,c=w.shape\n n_h=((h-fh+2*pad)//stride)+1\n n_w=((wd-fw+2*pad)//stride)+1\n x_paded=np.pad(x,pad,'constant')\n temp_dim=x_paded.shape[3]\n #print(temp_dim)\n out=np.zeros((ba,n_h,n_w,f))\n for m in range(0,ba):\n for i in range(0,n_h):\n for j in range(0,n_w):\n for n in range(0,f):\n h_t=i*stride\n h_t2=i*stride+fh\n w_t=j*stride\n w_t2=j*stride+fw\n temp=x_paded[pad+m,h_t:h_t2,w_t:w_t2,pad:temp_dim-pad] \n out[m,i,j,n]=np.sum(temp*w[n,:,:,:])+b[n]\n \n return out", "def compute_conv(in_size, kernel, stride, padding):\n return (in_size + 2 * padding - kernel) // stride + 1", "def conv3x3(in_planes, out_planes, stride=1, groups=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, groups=groups,\n padding=1, dilation=1, bias=False)", "def convolution(img, kernel, padding='fill'):\n kernel = np.rot90(kernel, 2)\n h,w = kernel.shape[:2]\n t,b,l,r = (h-1)//2, h//2, (w-1)//2, w//2 # Use numpy padding because it works for >2d\n padshape = [(t,b),(l,r)]+[(0,0)]*(len(img.shape[2:]))\n padded_img = np.pad(img, padshape, mode={'fill':'constant','replicate':'edge'}[padding])\n conved_img = np.zeros_like(img)\n for i in 1+np.arange(-h//2,h//2):\n for j in 1+np.arange(-w//2,w//2):\n if kernel[t+i,l+j]==0: continue\n conved_img += kernel[t+i,l+j]*padded_img[t+i:-b+i or None,l+j:-r+j or None]\n return conved_img", "def CustomConv3D(x_in, nf, strides=1, kernel_size = 3):\r\n\tx_out = Conv3D(nf, kernel_size=3, padding='same',kernel_initializer='he_normal', strides=strides)(x_in)\r\n\t#print(\"AAAAA\", x_out.shape)\r\n\tx_out = BatchNormalization()(x_out)\r\n\tx_out = LeakyReLU(0.2)(x_out)\r\n\treturn x_out", "def convolve(images, kernels, padding='same', stride=(1, 1)):\n m, h, w = images.shape[:3]\n kh, kw, c, nc = kernels.shape\n sh, sw = stride\n if type(padding) is tuple:\n ph, pw = padding\n elif padding == 'valid':\n ph, pw = 0, 0\n else:\n ph = (((h - 1) * sh + kh - h) // 2) + 1\n pw = (((w - 1) * sw + kw - w) // 2) + 1\n out_images = np.zeros((m, (h - kh + (2 * ph))//sh + 1,\n (w - kw + (2 * pw))//sw + 1, nc))\n images = np.pad(images, ((0, 0), (ph, ph), (pw, pw), (0, 0)), 'constant')\n for i in range((h - kh + (2 * ph))//sh + 1):\n for j in range((w - kw + (2 * pw))//sw + 1):\n for n in range(nc):\n out_images[:, i, j, n] = np.sum(kernels[:, :, :, n] * images[\n :, i*sh: i*sh + kh, j*sw: j*sw + kw, :], axis=(1, 2, 3))\n return out_images", "def conv3x3(in_planes, out_planes, stride=1, **kwargs):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False, **kwargs)", "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\r\n return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "def upconv_block(i, filters, shape, activation='relu', padding='same',\n data_format='channels_first'):\n c1 = Conv3D(filters, shape, activation=activation,\n padding=padding, data_format=data_format)(i)\n c2 = Conv3D(filters, shape, activation=activation,\n padding=padding, data_format=data_format)(c1)\n u = UpSampling3D(size=(1, 2, 2), data_format=data_format)(c2)\n c3 = Conv3D(int(filters / 2),\n (1, 2, 2),\n activation=activation,\n padding=padding,\n data_format=data_format)(u)\n return c3", "def conv2D(null,channels,X,stride,kernel_shape,padding = False,initialize_weights = True,*args):\n # filters = dimensionality of output space\n # If padding is enabled, we pad the input with zeros such that the input size\n # remains the same if weights with stride 1 are applied to the input\n if initialize_weights:\n kernel = np.random.normal(size = (kernel_shape[0],kernel_shape[1],kernel_shape[2]))*math.sqrt(1/(kernel_shape[0]*kernel_shape[1]*kernel_shape[2])) # Our input\n kernel = torch.FloatTensor(kernel)\n kernel.requires_grad = False\n else:\n kernel = args[0] # weights and bias must be given if initialise weights is disabled\n bias = args[1]\n kernel_shape = kernel.shape\n \n X = X.detach().numpy()\n if padding: # Can only pad during initialization -> weights and input shapes cannot change during feedforward and backpropagation\n if kernel_shape[1] % 2 == 0 and kernel_shape[2] % 2 == 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2)-1,math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2)-1)), 'symmetric')\n elif kernel_shape[1] % 2 != 0 and kernel_shape[2] % 2 == 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2),math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2)-1)), 'symmetric')\n elif kernel_shape[1] % 2 == 0 and kernel_shape[2] % 2 != 0:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2)-1,math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2))), 'symmetric')\n else:\n X = np.pad(X,((0,0),(math.floor(kernel_shape[1]/2),math.floor(kernel_shape[1]/2)),(math.floor(kernel_shape[2]/2),math.floor(kernel_shape[2]/2))), 'symmetric')\n \n X = torch.FloatTensor(X)\n \n img_shape = X.shape\n \n output_size1 = math.floor((img_shape[1] - kernel_shape[1])/(stride)) + 1\n output_size2 = math.floor((img_shape[2] - kernel_shape[2])/(stride)) + 1\n output_shape = [channels,output_size1,output_size2]\n \n X_im2col,im = im2col(X,kernel,stride)\n \n \n if initialize_weights:\n weight = torch.reshape(kernel,(kernel_shape[0]*kernel_shape[1]*kernel_shape[2],1))\n # weight consists of only one weight vector. But the dimensionality of output space has to be\n # num_filters. So we need to stack weight vectors horizontally and create num_filters number of\n # feature maps\n for i in range(channels-1):\n weight2 = np.random.normal(size = (kernel_shape[0]*kernel_shape[1]*kernel_shape[2],1))*math.sqrt(1/(kernel_shape[0]*kernel_shape[1]*kernel_shape[2])) # Our input\n weight2 = torch.FloatTensor(weight2)\n weight2.requires_grad = False\n weight = torch.cat((weight2, weight),1) # do this num_filters - 1 number of times\n conv_output = torch.t(X_im2col).mm(weight)\n bias = torch.Tensor(np.random.normal(size = conv_output.shape))\n conv_output += bias\n conv_output = torch.reshape(conv_output,(output_shape))\n return torch.nn.Parameter(conv_output), torch.nn.Parameter(weight),X_im2col,im, output_shape,bias\n else:\n # Since weights are already initialised, the relevant channels are already dictated in the architecture.\n # Therefore, conv output is just a matmul\n conv_output = torch.t(X_im2col).mm(kernel) + bias\n return torch.nn.Parameter(conv_output),X_im2col", "def conv3x3(in_planes, out_planes, stride=1, groups=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, groups=groups, bias=False)" ]
[ "0.75487494", "0.7377383", "0.7326011", "0.7300764", "0.72812986", "0.7276984", "0.725113", "0.72361106", "0.723129", "0.72272843", "0.71892864", "0.7178802", "0.71645725", "0.71645725", "0.71645725", "0.7158782", "0.7156602", "0.7156602", "0.7156602", "0.7156602", "0.7156602", "0.7156602", "0.7156602", "0.7129435", "0.7129435", "0.71225184", "0.7115465", "0.70966864", "0.70940775", "0.70940775", "0.70940775", "0.70940775", "0.70940775", "0.70940775", "0.70940775", "0.70940775", "0.70940775", "0.70940775", "0.70940775", "0.70940775", "0.70940775", "0.70940775", "0.70940775", "0.70838654", "0.7083813", "0.7082816", "0.7081188", "0.7076807", "0.70698804", "0.7046064", "0.70459145", "0.7038423", "0.7019238", "0.6994352", "0.69907147", "0.69762117", "0.69692105", "0.69595623", "0.69565064", "0.6954264", "0.6941116", "0.6934428", "0.6895985", "0.68941146", "0.6889336", "0.6881835", "0.68738455" ]
0.7081058
74
Constructs a BiRealNet18 model.
def birealnet18(pretrained=False, **kwargs): model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def birealnet34(pretrained=False, **kwargs):\n model = BiRealNet(BasicBlock, [6, 8, 12, 6], **kwargs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model", "def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], bitW, bitA, **kwargs)\n if pretrained == True:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n model_dict = model.state_dict()\n model_keys = model_dict.keys()\n for name, param in load_dict.items():\n if name.replace('module.', '') in model_keys:\n model_dict[name.replace('module.', '')] = param \n model.load_state_dict(model_dict) \n return model", "def rl_modelrl_l1_tiny():\n hparams = rl_modelrl_tiny()\n hparams.generative_model_params = \"basic_conv_l1\"\n return hparams", "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def rl_modelrl_l1_base():\n hparams = rl_modelrl_base()\n hparams.generative_model_params = \"basic_conv_l1\"\n return hparams", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model", "def __init__(self, n_lm, n_ang):\n super(MVCNet, self).__init__()\n self.convM1_sag = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.convM1_cor = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.xModule1 = xModule([64, 128, 64], 64, 4, 2, 1, 128, 128, 0.25)\n self.xModule2 = xModule([128, 64, 32], 128, 4, 2, 1, 256, 256, 0.25)\n self.xModule3 = xModule([256, 32, 16], 256, 4, 2, 1, 512, 512, 0.25)\n self.SLE_sag = SLE([512, 16, 8], 512, n_lm)\n self.SLE_cor = SLE([512, 16, 8], 512, n_lm)\n self.CAE_sag = CAE(512, n_lm, n_ang)\n self.CAE_cor = CAE(512, n_lm, n_ang)", "def rl_modelrl_l2_tiny():\n hparams = rl_modelrl_tiny()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams", "def rl_modelrl_l2_base():\n hparams = rl_modelrl_base()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def ResNet18(num_classes=10):\n return ResNet(BasicBlock, \n [2, 2, 2, 2],\n num_classes=num_classes)", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnet18']))\n return model", "def raw_model():\n model = cobra.Model(id_or_model=\"raw_model\", name=\"raw_model\")\n rxn_1 = cobra.Reaction(\"BIOMASS_TEST\")\n rxn_2 = cobra.Reaction(\"RXN2\")\n rxn_3 = cobra.Reaction(\"RXN3\")\n rxn_4 = cobra.Reaction(\"RXN4\")\n model.add_reactions([rxn_1, rxn_2, rxn_3, rxn_4])\n model.objective = rxn_3\n return model", "def newModel(self, model_name):\n model = super().newModel(model_name)\n model.Params.Method = self.getint(CC.L2_GRB_METHOD, section=CC.GUROBI, default=-1)\n model.Params.Presolve = self.getint(CC.L2_GRB_PRESOLVE, section=CC.GUROBI, default=-1)\n model.Params.PreSparsify = self.getint(CC.L2_GRB_PRESPARSIFY, section=CC.GUROBI, default=-1)\n return model", "def build_model():", "def _create_model(self):\n config = {\n \"input_features\": self.input_features,\n \"output_features\": self.output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n TRAINER: {\"epochs\": 2, BATCH_SIZE: 128},\n }\n return LudwigModel(config, logging_level=logging.WARN)", "def build_sys_rec_model():\n print(\"building model...\")\n model = Merchant2VecModel()\n model.train(final_training=True)\n model.save_model()", "def __init__(self, units):\n super(BahdanauAttention, self).__init__()\n self.W1 = tf.keras.layers.Dense(units)\n self.W2 = tf.keras.layers.Dense(units)\n self.V = tf.keras.layers.Dense(1)", "def resnet18_custom(input_channels):\n model = ResNet(input_channels, BasicBlock, [2])\n\n return model", "def resnet18(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [2, 2, 2, 2], shortcut_type, num_classes, in_channels)\n return model", "def rl_modelrl_ae_l1_base():\n hparams = rl_modelrl_ae_base()\n hparams.generative_model_params = \"basic_conv_l1\"\n return hparams", "def rl_modelrl_l1_short():\n hparams = rl_modelrl_short()\n hparams.generative_model_params = \"basic_conv_l1\"\n return hparams", "def rl_modelrl_l2_short():\n hparams = rl_modelrl_short()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams", "def ResNetLN18(num_classes=10):\n return ResNet(BasicLNBlock, \n [2, 2, 2, 2], \n num_classes=num_classes,\n norm_type=\"LN\")", "def resnet18_origin(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def build_model(cls, args, task):\n global PAD_IDX, EOS_IDX\n # make sure all arguments are present in older models\n w2v_lm_architecture2(args)\n\n if not hasattr(args, \"max_source_positions\"):\n args.max_source_positions = 2048\n if not hasattr(args, \"max_target_positions\"):\n args.max_target_positions = 2048\n\n tgt_dict = task.target_dictionary\n PAD_IDX = tgt_dict.pad()\n EOS_IDX = tgt_dict.eos()\n\n encoder = cls.build_encoder(args)\n assigner = cls.build_assigner(args, encoder.d)\n lm = cls.build_lm(args, task)\n\n return cls(args, encoder, assigner, lm)", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def model_wrapper(self):\n original = self.args.rnn_type\n if(self.args.rnn_type=='DeepCoNN'):\n self.args.rnn_type = 'RAW_MSE_MAX_CNN_FM'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='TRANSNET'):\n self.args.rnn_type = 'RAW_MSE_MAX_CNN_FM_TNET'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='DATT'):\n self.args.rnn_type ='RAW_MSE_DUAL_DOT'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='MPCN'):\n self.args.rnn_type = 'RAW_MSE_MPCN_FN_FM'\n self.args.base_encoder = 'NBOW'\n\n print(\"Conversion to {} | base:{}\".format(\n self.args.rnn_type,\n self.args.base_encoder))", "def rl_modelrl_ae_l2_base():\n hparams = rl_modelrl_ae_base()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams", "def build_mobilenetv2_backbone(cfg, input_shape):\n stem = MobileStem(\n input_shape.channels,\n cfg.MODEL.MOBILENET.STEM_OUT_CHANNELS,\n cfg.MODEL.MOBILENET.NORM,\n cfg.MODEL.MOBILENET.ACTIVATION\n )\n\n model = MobileNetV2(\n stem,\n cfg.MODEL.MOBILENET.INVERTED_RESIDUAL_SETTING,\n cfg.MODEL.MOBILENET.NORM,\n cfg.MODEL.MOBILENET.ACTIVATION,\n cfg.MODEL.MOBILENET.NUM_CLASSES,\n cfg.MODEL.MOBILENET.OUT_FEATURES,\n )\n\n model.freeze(cfg.MODEL.BACKBONE.FREEZE_AT)\n return model", "def __init__(self,\n trn: str = None,\n dev: str = None,\n tst: str = None,\n sampler_builder: SamplerBuilder = None,\n dependencies: str = None,\n scalar_mix: ScalarMixWithDropoutBuilder = None,\n use_raw_hidden_states=False,\n lr=1e-3, separate_optimizer=False,\n cls_is_bos=True,\n sep_is_eos=True,\n delimiter=None,\n max_seq_len=None, sent_delimiter=None, char_level=False, hard_constraint=False,\n transform=None,\n tagging_scheme='BMES',\n crf=False,\n token_key='token',\n dict_force: Union[DictInterface, Union[Dict[str, Any], Set[str]]] = None,\n dict_combine: Union[DictInterface, Union[Dict[str, Any], Set[str]]] = None,\n **kwargs) -> None:\n super().__init__(**merge_locals_kwargs(locals(), kwargs, excludes=(\n 'self', 'kwargs', '__class__', 'dict_force', 'dict_combine'))) # avoid to config\n self.transform = transform\n self.vocabs = VocabDict()\n self.dict_force = dict_force\n self.dict_combine = dict_combine", "def dilated_resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model", "def ffc_resnet18(pretrained=False, **kwargs):\n model = FFCResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def create_model_eg_bin3orig(my_learning_rate):\n # This is a first try to get a simple model that works\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Conv2D(\n filters=128, kernel_size=(3,3), input_shape=(8,8,15), strides=(1, 1), padding='same'))\n model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n model.add(tf.keras.layers.Conv2D(\n filters=128, kernel_size=(3,3), strides=(1, 1), padding='same'))\n model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(units=64, activation='relu'))\n model.add(tf.keras.layers.Dense(units=33))\n\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=my_learning_rate),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n return model", "def __init__(self, elementLabels: tuple):\n super().__init__(DEFAULT_MODEL)\n pass", "def BuildModel(ANNSetup,model):\n\n if(isinstance(ANNSetup.Activ,str)):\n model.add(Dense(ANNSetup.Neurons[0], kernel_regularizer=l2(ANNSetup.Regu), activation=ANNSetup.Activ, kernel_initializer=Winit(ANNSetup.Winit), input_dim=ANNSetup.InputDim))\n if(ANNSetup.Dropout != None):\n model.add(Dropout(ANNSetup.Dropout))\n for i in range(1,len(ANNSetup.Neurons)):\n if(i == len(ANNSetup.Neurons)-1):\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation='sigmoid'))\n else:\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation=ANNSetup.Activ))\n else:\n model.add(Dense(ANNSetup.Neurons[0], kernel_regularizer=l2(ANNSetup.Regu), kernel_initializer=Winit(ANNSetup.Winit), input_dim=ANNSetup.InputDim))\n model.add(LeakyReLU(alpha=ANNSetup.Activ))\n if(ANNSetup.Dropout != None):\n model.add(Dropout(ANNSetup.Dropout))\n for i in range(1,len(ANNSetup.Neurons)):\n if(i == len(ANNSetup.Neurons)-1):\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation='sigmoid'))\n else:\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit)))\n model.add(LeakyReLU(alpha=ANNSetup.Activ))\n\n return model", "def MakeModel(self):\n pass", "def create_model() -> Model:\n # Create a neural network model that includes several dense layers with hyperbolic tangent activations, L2 regularization, and batch normalization\n regularizer = l2(0)\n dropout = 0\n activation = 'tanh'\n model = Sequential([\n InputLayer(input_shape=(16,)),\n BatchNormalization(),\n Dense(12, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(8, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(1, kernel_regularizer=regularizer)\n ])\n # Output a summary of the model's architecture\n print(model.summary())\n # Use a mean squared error loss function and an Adam optimizer; do not print accuracy because this is a regression task\n model.compile(\n optimizer='adam',\n loss='mse',\n metrics=['mae']\n )\n # Return the untrained model\n return model", "def make_model(self):\n onnx_graph = onnx.helper.make_graph(\n self._nodes, self._name, self._inputs, self._outputs, self._initializers\n )\n kwargs = {}\n kwargs[\"opset_imports\"] = self._get_opsets()\n kwargs[\"producer_name\"] = \"TVM Relay\"\n kwargs[\"producer_version\"] = tvm.__version__\n\n return onnx.helper.make_model(onnx_graph, **kwargs)", "def _create_base_model(self, modality):\n\n if modality == \"RGB\":\n in_channels = 3\n elif modality == \"Flow\":\n in_channels = 10\n elif modality == \"Audio\":\n in_channels = 1\n\n model_dir = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n )\n model_dir = os.path.join(model_dir, \"weights\")\n\n is_audio = True if modality == \"Audio\" else False\n\n if \"vgg\" in self.base_model_name:\n base_model = VGG(self.cfg.model.vgg.type, modality, in_channels)\n elif \"resnet\" in self.base_model_name:\n base_model = Resnet(self.cfg.model.resnet.depth, modality, in_channels)\n elif self.base_model_name == \"bninception\":\n pretrained = \"kinetics\" if modality == \"Flow\" else \"imagenet\"\n base_model = bninception(\n in_channels,\n modality,\n model_dir=model_dir,\n pretrained=pretrained,\n is_audio=is_audio,\n attend=self.use_attention,\n )\n\n return base_model", "def create_model(max_seq_len, adapter_size=64):\n\n # adapter_size = 64 # see - arXiv:1902.00751\n\n # create the bert layer\n with tf.io.gfile.GFile(bert_config_file, \"r\") as reader:\n bc = StockBertConfig.from_json_string(reader.read())\n bert_params = map_stock_config_to_params(bc)\n bert_params.adapter_size = adapter_size\n bert = BertModelLayer.from_params(bert_params, name=\"bert\")\n\n input_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name=\"input_ids\")\n # token_type_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name=\"token_type_ids\")\n # output = bert([input_ids, token_type_ids])\n output = bert(input_ids)\n\n print(\"bert shape\", output.shape)\n cls_out = keras.layers.Lambda(lambda seq: seq[:, 0, :])(output)\n cls_out = keras.layers.Dropout(0.5)(cls_out)\n logits = keras.layers.Dense(units=1024, activation=\"tanh\")(cls_out)\n logits = keras.layers.Dropout(0.5)(logits)\n logits = keras.layers.Dense(units=2, activation=\"softmax\")(logits)\n\n # model = keras.Model(inputs=[input_ids, token_type_ids], outputs=logits)\n # model.build(input_shape=[(None, max_seq_len), (None, max_seq_len)])\n model = keras.Model(inputs=input_ids, outputs=logits)\n model.build(input_shape=(None, max_seq_len))\n\n # load the pre-trained model weights\n load_stock_weights(bert, bert_ckpt_file)\n\n # freeze weights if adapter-BERT is used\n if adapter_size is not None:\n freeze_bert_layers(bert)\n\n model.compile(optimizer=keras.optimizers.Adam(),\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[keras.metrics.SparseCategoricalAccuracy(name=\"acc\")])\n\n model.summary()\n\n return model", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def build_model(cls, args, task):\n # print(\"In build_model !!!\")\n default_architecture(args)\n assert args.load_hf_bert_from != ''\n encoder = HuggingFaceBertEncoder(args, task.dictionary)\n\n return cls(args, encoder, task)", "def resnet18(pretrained: bool = False, include_top: bool = False, freeze: bool = False):\n model = torchvision.models.resnet18(pretrained=pretrained)\n if freeze:\n set_parameter_requires_grad(model, \"fc\")\n if not include_top:\n output_size = model.fc.in_features\n model.fc = nn.Identity()\n return BackboneModule(model, output_size)\n else:\n return model", "def build_model():\n model = Sequential()\n model.add(Dense(beer_emb.EMB_DIM, activation=\"relu\",\n input_dim=beer_emb.EMB_DIM))\n model.add(Dropout(0.5))\n model.add(Dense(64, activation=\"relu\"))\n model.add(Dropout(0.5))\n model.add(Dense(32, activation=\"relu\"))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy',\n metrics=['accuracy'], optimizer='adam')\n\n return model", "def build_model(cls, args, task):\n\n # make sure all arguments are present in older models\n base_lm_architecture(args)\n\n if args.decoder_layers_to_keep:\n args.decoder_layers = len(args.decoder_layers_to_keep.split(\",\"))\n\n if getattr(args, \"max_target_positions\", None) is None:\n args.max_target_positions = getattr(\n args, \"tokens_per_sample\", DEFAULT_MAX_TARGET_POSITIONS\n )\n\n if args.character_embeddings:\n embed_tokens = CharacterTokenEmbedder(\n task.source_dictionary,\n eval(args.character_filters),\n args.character_embedding_dim,\n args.decoder_embed_dim,\n args.char_embedder_highway_layers,\n )\n elif args.adaptive_input:\n embed_tokens = AdaptiveInput(\n len(task.source_dictionary),\n task.source_dictionary.pad(),\n args.decoder_input_dim,\n args.adaptive_input_factor,\n args.decoder_embed_dim,\n options.eval_str_list(args.adaptive_input_cutoff, type=int),\n args.quant_noise_pq,\n args.quant_noise_pq_block_size,\n )\n else:\n embed_tokens = cls.build_embedding(\n args, task.source_dictionary, args.decoder_input_dim\n )\n\n if args.tie_adaptive_weights:\n assert args.adaptive_input\n assert args.adaptive_input_factor == args.adaptive_softmax_factor\n assert (\n args.adaptive_softmax_cutoff == args.adaptive_input_cutoff\n ), \"{} != {}\".format(\n args.adaptive_softmax_cutoff, args.adaptive_input_cutoff\n )\n assert args.decoder_input_dim == args.decoder_output_dim\n\n decoder = LinearTransformerDecoder(\n args, task.target_dictionary, embed_tokens, no_encoder_attn=True\n )\n return cls(decoder)", "def create_model(self):\n # Create the generator and discriminators\n self.generator_lungs = self.generator_model()\n self.generator_organs = self.generator_model()\n\n self.disc_lungs = self.discriminator_model_lungs()\n self.disc_organs = self.discriminator_model_organs()\n\n # Initialize the optimizer and backend\n self.generator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.set_backend = tf.keras.backend.set_floatx('float32')\n\n # Create the summary writer\n self.create_summary_writer()\n print('Models are created.')\n return self", "def createModel(self):\n outputs, inputs = baseUNet(self.input_shape,\n self.conv_depth,\n self.n_classes,\n self.init_w,\n self.dropout)\n \n if self.regression == True:\n outputs = Lambda(getPropOfGround)(outputs)\n \n model = Model(inputs = inputs,outputs = outputs)\n \n model.compile(optimizer = self.optimizer,\n loss=self.loss_function,\n metrics=self.metrics)\n\n if self.old_weights != None:\n model.set_weights(self.old_weights)\n self.model = model", "def rl_modelrl_l1_medium():\n hparams = rl_modelrl_medium()\n hparams.generative_model_params = \"basic_conv_l1\"\n return hparams", "def __init__(self, nodeLabels: tuple):\n super().__init__(DEFAULT_MODEL)\n pass", "def init_model(\n sample_length: int, base_model: str, num_classes: int = None\n ) -> torchvision.models.video.resnet.VideoResNet:\n if base_model not in (\"ig65m\", \"kinetics\"):\n raise ValueError(\n f\"Not supported model {base_model}. Should be 'ig65m' or 'kinetics'\"\n )\n\n # Decide if to use pre-trained weights for DNN trained using 8 or for 32 frames\n model_name = f\"r2plus1d_34_{sample_length}_{base_model}\"\n\n print(f\"Loading {model_name} model\")\n\n model = torch.hub.load(\n TORCH_R2PLUS1D,\n model_name,\n num_classes=MODELS[model_name],\n pretrained=True,\n )\n\n # Replace head\n if num_classes is not None:\n model.fc = nn.Linear(model.fc.in_features, num_classes)\n\n return model, model_name", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def __init__(self, model: MT):\n self.model: Final[MT] = model", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n\n self.loss_names = ['G_SH']\n self.visual_names = ['input', 'pr_SH', 'gt_SH']\n self.model_names = ['G1']\n\n if not opt.no_brightness:\n self.loss_names += ['G_BA', 'G_BC']\n self.visual_names += ['pr_BA', 'gt_BA']\n self.model_names += ['G3']\n\n if opt.reg_LTM:\n self.loss_names += ['LTMReg']\n\n self.light_res = opt.light_res\n\n\n # Intrinsic network\n if opt.latent_Ls or opt.latent_Lt:\n netG1name = 'unet_256_latent_inL'\n else:\n netG1name = 'unet_256_latent'\n\n input_nc = opt.input_nc\n if opt.in_Ls:\n input_nc += 1\n if opt.in_Lt:\n input_nc += 1\n\n if opt.LTM:\n self.dim_LTM = self.light_res**2\n if self.opt.enc_LTM:\n self.dim_LTM = opt.dim_LTM\n use_hidden = True if not opt.enc_ill_hid==-1 else False\n self.enc_LTM = networks.init_net(networks.IlluminationEncoder(self.light_res**2, opt.enc_ill_hid, self.dim_LTM, use_hidden), opt.init_type, opt.init_gain, self.gpu_ids)\n\n self.netG1 = networks.define_G(input_nc, self.dim_LTM, opt.ngf, netG1name, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, True, self.gpu_ids)\n\n else:\n if opt.no_latent_color:\n output_nc = 3\n else:\n output_nc = 1\n self.netG1 = networks.define_G(input_nc, output_nc, opt.ngf, netG1name, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, False, self.gpu_ids)\n\n # Brightness network\n g3_input_nc = 3\n if opt.cas and opt.cat_In:\n g3_input_nc = g3_input_nc + opt.input_nc\n if not opt.cas:\n if opt.in_Ls:\n g3_input_nc += 1\n if opt.in_Lt:\n g3_input_nc += 1\n self.netG3 = networks.define_G(g3_input_nc, 1, opt.ngf, 'resnet_9blocks_latent', opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, False, self.gpu_ids)\n if self.isTrain:\n # define loss functions\n self.criterionS = torch.nn.MSELoss()\n self.criterionBA = torch.nn.MSELoss()\n # self.criterionBP = torch.nn.MSELoss()\n self.criterionBC = torch.nn.MSELoss()\n self.criterionReg = torch.nn.MSELoss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G1 = torch.optim.Adam(self.netG1.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n # self.optimizer_G2 = torch.optim.Adam(self.netG2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_G3 = torch.optim.Adam(self.netG3.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G1)\n # self.optimizers.append(self.optimizer_G2)\n self.optimizers.append(self.optimizer_G3)", "def __init__(self):\n # Initializing the Model with the class\n super(Model, self).__init__()\n # torch.nn.Linear applies a Linear transformation. The first parameter is the size of each input sample. The second is the size of the output sample\n self.linear = torch.nn.Linear(1, 1)", "def resnet34(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], bitW, bitA, **kwargs)\n if pretrained == True:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n model_dict = model.state_dict()\n model_keys = model_dict.keys()\n for name, param in load_dict.items():\n if name.replace('module.', '') in model_keys:\n model_dict[name.replace('module.', '')] = param \n model.load_state_dict(model_dict) \n return model", "def load_model():\n\t# Load model options\n\tprint ('Loading model parameters...')\n\twith open('%s.pkl'%path_to_umodel, 'rb') as f:\n\t\tuoptions = pkl.load(f)\n\twith open('%s.pkl'%path_to_bmodel, 'rb') as f:\n\t\tboptions = pkl.load(f)\n\n\t# Load parameters\n\tuparams = init_params(uoptions)\n\tuparams = load_params(path_to_umodel, uparams)\n\tutparams = init_tparams(uparams)\n\tbparams = init_params_bi(boptions)\n\tbparams = load_params(path_to_bmodel, bparams)\n\tbtparams = init_tparams(bparams)\n\n\t# Extractor functions\n\tprint ('Compiling encoders...')\n\tembedding, x_mask, ctxw2v = build_encoder(utparams, uoptions)\n\tf_w2v = theano.function([embedding, x_mask], ctxw2v, name='f_w2v')\n\tembedding, x_mask, ctxw2v = build_encoder_bi(btparams, boptions)\n\tf_w2v2 = theano.function([embedding, x_mask], ctxw2v, name='f_w2v2')\n\n\t# Tables\n\tprint ('Loading tables...')\n\tutable, btable = load_tables()\n\n\t# Store everything we need in a dictionary\n\tprint ('Packing up...')\n\tmodel = {}\n\tmodel['uoptions'] = uoptions\n\tmodel['boptions'] = boptions\n\tmodel['utable'] = utable\n\tmodel['btable'] = btable\n\tmodel['f_w2v'] = f_w2v\n\tmodel['f_w2v2'] = f_w2v2\n\n\treturn model", "def load_fasttext_format(cls, model_file, encoding='utf8'):\n model = cls()\n if not model_file.endswith('.bin'):\n model_file += '.bin'\n model.file_name = model_file\n model.load_binary_data(encoding=encoding)\n return model", "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "def build_unet_backbone(name: str,\r\n input_tensor: tf.Tensor,\r\n n_levels: int = 4,\r\n weights: Optional[str] = None) -> tf.keras.Model:\r\n if name not in _MODELS:\r\n supported_models = list_supported_models()\r\n supported_models = '\\n'.join(f'- {o}' for o in supported_models)\r\n raise ValueError(f\"Backbone {name} is not supported. \"\r\n f\"Supported backbones are: \\n {supported_models}\")\r\n\r\n model_cls, _ = _MODELS[name]\r\n model = model_cls(input_tensor=input_tensor,\r\n include_top=False,\r\n weights=weights)\r\n\r\n outputs = [model.get_layer(o).output\r\n for o in _DEFAULT_FEATURE_LAYERS[name][:n_levels]]\r\n\r\n return tf.keras.Model(inputs=model.inputs,\r\n outputs=outputs[::-1],\r\n name=name)", "def __init__(self, model: Model1D):\n self._model = model", "def __init__(self, backbone_name, config):\n\n backbone_config = Schema(\n {\n Required(\"input_shape\"): Schema((int, int, int)),\n Required(\"include_top\"): bool,\n Required(\"weights\"): str,\n Optional(\"alpha\"): float,\n }\n )\n\n config = backbone_config(config)\n\n if backbone_name == \"MobileNetV2\":\n self.model = tf.keras.applications.MobileNetV2(**config)\n elif backbone_name == \"ResNet50\":\n self.model = tf.keras.applications.ResNet50(**config)\n elif backbone_name == \"InceptionV3\":\n self.model = tf.keras.applications.InceptionV3(**config)\n\n # Remove Layers until Conv4\n for i, layer in enumerate(reversed(self.model.layers)):\n if backbone_name == \"ResNet50\" and layer._name == \"conv4_block6_out\":\n break\n elif (\n backbone_name == \"MobileNetV2\" and layer._name == \"block_13_expand_relu\"\n ):\n break\n else:\n self.model._layers.pop()\n\n self.model.layers[-1]._name = \"feature_map\"\n\n self.model = Model(\n self.model.input, self.model.layers[-1].output, name=\"Backbone\"\n )", "def load_model():\n logging.info(\"Load language model...\")\n ngram_arpa_t = pkg_resources.resource_filename(\"hwrt\", \"misc/ngram.arpa.tar.bz2\")\n with tarfile.open(ngram_arpa_t, \"r:bz2\") as tar:\n tarfolder = tempfile.mkdtemp()\n tar.extractall(path=tarfolder)\n ngram_arpa_f = os.path.join(tarfolder, \"ngram.arpa\")\n with open(ngram_arpa_f) as f:\n content = f.read()\n ngram_model = NgramLanguageModel()\n ngram_model.load_from_arpa_str(content)\n return ngram_model", "def __init__(self, config: BertConfig):\r\n super().__init__(config)\r\n ### YOUR CODE HERE\r\n self.num_labels = config.num_labels # [0, 1] (start or end)\r\n self.bert = BertModel(config)\r\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # TODO: Not a separate FFN ? (For Start_FFN and End_FFN)\r\n\r\n ### END YOUR CODE\r\n\r\n # Don't forget initializing the weights\r\n self.init_weights()", "def __init__(self):\n self.scaler = None\n self.model = None\n self.encoder = {}\n\n self._load_model()\n return", "def build_model(self):\n import tensorflow as tf\n \n y = tf.nn.relu(tf.matmul(self.variables[\"input_observation\"], self.variables[\"W1\"]) + \n self.variables[\"b1\"], name=\"y1\")\n \n for i in range(self.n_hidden-1):\n y = tf.nn.relu(tf.matmul(y, self.variables[\"W\"+str(i+2)]) + \n self.variables[\"b\"+str(i+2)], name=\"y\"+str(i+2))\n \n self.variables[\"y\"] = [tf.matmul(y, self.variables[\"Wo_0\"]) + self.variables[\"bo_0\"]]\n for i in range(1, len(self.output_size)):\n self.variables[\"y\"] += [tf.matmul(y, self.variables[\"Wo_%s\"%i]) + self.variables[\"bo_%s\"%i]]", "def __init__(self):\n self.name = \"Osyczka\"\n objectives = [ob_os_1, ob_os_2]\n constraints = [con_os_1, con_os_2, con_os_3, con_os_4, con_os_5, con_os_6]\n decisions = [Decision(0, 10), Decision(0, 10), Decision(1, 5), Decision(0, 6), Decision(1, 5), Decision(0, 10)]\n Model.__init__(self, objectives, constraints, decisions)", "def model_build(self):\n\n # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!\n X_input = Input(self.inputData[0].shape)\n\n '''\n # CONV -> BN -> RELU Block applied to X\n X = Conv2D(8, (8, 8), name='conv0')(X_input)\n X = BatchNormalization(name='bn0')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool0')(X)\n X = Dropout(0.1, name='dropout0')(X)\n\n X = Conv2D(16, (16, 16), name='conv1')(X)\n X = BatchNormalization(name='bn1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool1')(X)\n X = Dropout(0.1, name='dropout1')(X)\n\n X = Conv2D(16, (32, 32), name='conv2')(X)\n X = BatchNormalization(name='bn2')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool2')(X)\n X = Dropout(0.1, name='dropout2')(X)\n' '''\n\n X = Dense(500, activation='relu', name='fc0')(X_input)\n X = Dropout(0.1, name='dropout1')(X)\n X = Dense(500, activation='relu', name='fc1')(X)\n X = Dropout(0.1, name='dropout2')(X)\n X = Dense(3, activation='softmax', name='fc2')(X)\n\n # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.\n self.model = Model(inputs=X_input, outputs=X, name='acouModel')", "def __init__(self):\n self.model = self._get_model()\n\n # NOTE: The order of this list hardcoded here, and needs to be changed when re-training the model!\n # When exporting the model in tflite format, the model_spec is lost, so we cannot do it like that:\n # classes = ['???'] * model.model_spec.config.num_classes\n # label_map = model.model_spec.config.label_map\n # for label_id, label_name in label_map.as_dict().items():\n # classes[label_id-1] = label_name\n self.classes = ['Baked Goods', 'Salad', 'Cheese', 'Seafood', 'Tomato']", "def build_model_mobilenet(num_classes):", "def rl_modelrl_l2_medium():\n hparams = rl_modelrl_medium()\n hparams.generative_model_params = \"basic_conv_l2\"\n return hparams", "def bl_resnet50(pretrained=False, **kwargs):\n model = bL_ResNet([2, 3, 5, 3], **kwargs)\n # print ('model created')\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "def build_model(self) -> nn.Module:\n pass", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def resnet18(num_classes, pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=1000)\n if pretrained:\n logger.info('Resnet18: Loading pretrained')\n _model_loaded = try_load_model('resnet18')\n model.load_state_dict(_model_loaded)\n if num_classes != 1000:\n model.reinit_fc(num_classes)\n\n layers = [model.fc, model.layer4, model.layer3]\n\n return model, layers", "def create_base_network(NumberOfFeatures, NumberOfClasses,init_mode='glorot_normal'):\n network = Sequential()\n network.add(Dense(44, activation='sigmoid', kernel_initializer=init_mode,input_dim=NumberOfFeatures))\n# network.add(Dense(22, activation='sigmoid',kernel_initializer=init_mode))\n network.add(Dense(NumberOfClasses, activation='softmax',kernel_initializer=init_mode))\n return network", "def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model", "def build_backbone(config):\n assert config.MODEL.BACKBONE in ['resnet50', 'resnet101'], \"backbone name is not supported!\"\n backbone_name = config.MODEL.BACKBONE\n dilation = False\n train_backbone = not config.EVAL\n return_interm_layers = False #TODO: impl case True for segmentation\n\n position_embedding = build_position_encoding(config.MODEL.TRANS.HIDDEN_SIZE)\n backbone = Backbone(backbone_name, train_backbone, return_interm_layers, dilation)\n model = Joiner(backbone, position_embedding)\n model.num_channels = backbone.num_channels\n\n return model", "def from_config(cls,config):\n ## find labels in list\n label_list = load_label_list(config.label_list)\n use_cuda = True if torch.cuda.is_available() else False\n\n global_args = {\n \"fp16\" : False,\n \"classification_report\" : True,\n \"tensorboard_dir\" : config.tensorboard_dir,\n \"wandb_project\" : config.wandb_project,\n \"wandb_kwargs\" : {\n \"name\" : config.wandb_name,\n \"entity\" : config.wandb_entity,\n }\n }\n\n model = NERModel(\n config.model_name,\n config.model_type,\n use_cuda=use_cuda,\n labels=label_list,\n args=global_args,\n )\n return cls(model,config)", "def __init__(self, received_bits, conv_code, prev_weights):\n\n # TOOD determine whether the init method also needs a dictionary\n # of valid connections and generated parity bits.\n\n # Determine rate and width from inputs\n self.rate = len(received_bits);\n self.width = len(conv_code[0]);\n\n # Make sure that window width is consistent across inputs\n #assert self.width == len(prev_weights);\n for term in conv_code:\n assert self.width == len(term);\n\n # Store received bits for time step and previous weights\n self.received_bits = received_bits\n self.prev_weights = prev_weights\n self.conv_code = conv_code\n self.trellis_keys = []\n\n # Initializes Unit Trellis Dictionary which is filled by generate_unit_tresllis\n # Keys are states and values are possible next bits based on the state indicated by the key\n self.unit_trellis = {}", "def test_resnet18():\n model = RestNet18()\n assert type(model) == RestNet18", "def init_model(self):\n cxnlib.CXNNetInitModel(self.handle)" ]
[ "0.6268504", "0.620634", "0.6204012", "0.61781776", "0.616232", "0.61510354", "0.61510354", "0.61510354", "0.61510354", "0.61510354", "0.6100227", "0.6087697", "0.6079006", "0.6012798", "0.5975343", "0.59738714", "0.59638786", "0.5960628", "0.5948427", "0.5936224", "0.5920398", "0.591646", "0.591646", "0.591646", "0.58933043", "0.5810627", "0.5810202", "0.57819504", "0.5714675", "0.5711785", "0.5694667", "0.56945586", "0.5673437", "0.56615764", "0.56475353", "0.56441593", "0.5642551", "0.5637632", "0.56299496", "0.5625149", "0.5622241", "0.5607465", "0.5595718", "0.55884254", "0.55782694", "0.556173", "0.5556331", "0.55388635", "0.5533531", "0.5531785", "0.55313766", "0.5507816", "0.54975307", "0.54965174", "0.5491685", "0.5491685", "0.5491685", "0.5491685", "0.5491685", "0.54852617", "0.5481227", "0.54781723", "0.5460092", "0.54597485", "0.54505295", "0.5445276", "0.5435036", "0.5416328", "0.54160935", "0.54147524", "0.5408445", "0.54064006", "0.540249", "0.5400191", "0.53923506", "0.5391556", "0.5388657", "0.53865445", "0.5383021", "0.5373817", "0.5370645", "0.5368076", "0.5366006", "0.5365216", "0.53612363", "0.5360358", "0.5352866", "0.5347377", "0.5341634", "0.5337998", "0.5336846", "0.5336775", "0.5335858", "0.53304625", "0.53273845", "0.532275", "0.531069", "0.5294119", "0.5292129" ]
0.70269394
1
Constructs a BiRealNet34 model.
def birealnet34(pretrained=False, **kwargs): model = BiRealNet(BasicBlock, [6, 8, 12, 6], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def birealnet18(pretrained=False, **kwargs):\n model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs)\n return model", "def birealnet18(pretrained=False, **kwargs):\n model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs)\n return model", "def resnet34(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], bitW, bitA, **kwargs)\n if pretrained == True:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n model_dict = model.state_dict()\n model_keys = model_dict.keys()\n for name, param in load_dict.items():\n if name.replace('module.', '') in model_keys:\n model_dict[name.replace('module.', '')] = param \n model.load_state_dict(model_dict) \n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def __init__(self, pretrained=True, freeze_weights=True):\n super(RaisinNet34, self).__init__()\n # Define the model's name for it's output files\n # Load a pre-trained ResNet-34 model and turn off autograd\n # so its weights won't change.\n architecture = resnet34(pretrained=pretrained)\n if freeze_weights:\n for layer in architecture.parameters():\n layer.requires_grad = False\n # Copy the convolutional layers of the model.\n self.conv1 = architecture.conv1\n self.bn1 = architecture.bn1\n self.relu = architecture.relu\n self.maxpool = architecture.maxpool\n self.layer1 = architecture.layer1\n self.layer2 = architecture.layer2\n self.layer3 = architecture.layer3\n self.layer4 = architecture.layer4\n # Copy the average pooling layer of the model.\n self.avgpool = architecture.avgpool\n # Redefine the classification block of ResNet-34.\n # Use LeakyReLU units instead of ReLU units.\n # Output layer has 2 nodes only for the 2 classes in the PCam dataset.\n in_ftrs = architecture.fc.in_features\n self.fc = nn.Linear(in_features=in_ftrs, out_features=2, bias=True)\n # Define a LogSoftmax layer for converting outputs to probabilities\n # Not needed in `forward()` because included in nn.CrossEntropyLoss\n self.log_softmax = nn.LogSoftmax(dim=1)", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def resnet34(pretrained=False):\n model = ResNet(BasicBlock, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def construct(self):\n self.input_size = self.numplanes * self.boardsize**2\n \n if self.hidden:\n layers = [\n torch.nn.Linear(self.input_size, self.hidden), \n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden, self.boardsize**2)\n ]\n else:\n layers = [torch.nn.Linear(self.input_size, self.boardsize**2)]\n\n self.layers = torch.nn.ModuleList(layers)\n self.optimizer = torch.optim.Adam(self.parameters(), lr=1e-5)\n logging.info(\"Model initialized: %s\", self)", "def build_model(self) -> nn.Module:\n pass", "def new(self):\n self.define_layers()\n self.model = nn.Sequential(*self.layers)\n self.model.cuda()\n self.model = orthogonal_init(self.model)\n\n # Re-count N\n self.count_params()", "def ResNet34(num_classes=10):\n return ResNet(BasicBlock, \n [3,4,6,3], \n num_classes=num_classes)", "def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)", "def resnet34(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False)\n return model", "def resnet34(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False)\n return model", "def resnet34(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False)\n return model", "def __init__(self, n_lm, n_ang):\n super(MVCNet, self).__init__()\n self.convM1_sag = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.convM1_cor = conv_bn_prelu_dropout(1, 64, 4, 2, 1, 64, 64, 0.25)\n self.xModule1 = xModule([64, 128, 64], 64, 4, 2, 1, 128, 128, 0.25)\n self.xModule2 = xModule([128, 64, 32], 128, 4, 2, 1, 256, 256, 0.25)\n self.xModule3 = xModule([256, 32, 16], 256, 4, 2, 1, 512, 512, 0.25)\n self.SLE_sag = SLE([512, 16, 8], 512, n_lm)\n self.SLE_cor = SLE([512, 16, 8], 512, n_lm)\n self.CAE_sag = CAE(512, n_lm, n_ang)\n self.CAE_cor = CAE(512, n_lm, n_ang)", "def create_model(self):\n # Create the generator and discriminators\n self.generator_lungs = self.generator_model()\n self.generator_organs = self.generator_model()\n\n self.disc_lungs = self.discriminator_model_lungs()\n self.disc_organs = self.discriminator_model_organs()\n\n # Initialize the optimizer and backend\n self.generator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.set_backend = tf.keras.backend.set_floatx('float32')\n\n # Create the summary writer\n self.create_summary_writer()\n print('Models are created.')\n return self", "def model_creator(config):\n return nn.Linear(1, 1)", "def resnet34(pretrained: bool = False, include_top: bool = False, freeze: bool = False):\n model = torchvision.models.resnet34(pretrained)\n if freeze:\n set_parameter_requires_grad(model, \"fc\")\n if not include_top:\n output_size = model.fc.in_features\n model.fc = nn.Identity()\n return BackboneModule(model, output_size)\n else:\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnet34']))\n return model", "def bl_resnet50(pretrained=False, **kwargs):\n model = bL_ResNet([2, 3, 5, 3], **kwargs)\n # print ('model created')\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "def build_model():\n model = Sequential()\n model.add(Dense(beer_emb.EMB_DIM, activation=\"relu\",\n input_dim=beer_emb.EMB_DIM))\n model.add(Dropout(0.5))\n model.add(Dense(64, activation=\"relu\"))\n model.add(Dropout(0.5))\n model.add(Dense(32, activation=\"relu\"))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy',\n metrics=['accuracy'], optimizer='adam')\n\n return model", "def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model", "def resnet18(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], bitW, bitA, **kwargs)\n if pretrained == True:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n model_dict = model.state_dict()\n model_keys = model_dict.keys()\n for name, param in load_dict.items():\n if name.replace('module.', '') in model_keys:\n model_dict[name.replace('module.', '')] = param \n model.load_state_dict(model_dict) \n return model", "def getModel(config: configuration.Configuration) -> torch.nn.Module:\n if config.modelName == ModelName.DENSE:\n return DenseGenerator(1, 1, n_blocks=config.blockCount)\n elif config.modelName == ModelName.SHALLOW:\n return Shallow(1, 1, )\n elif config.modelName == ModelName.TIRAMISU:\n model = Tiramisu(1, 1, structure=(\n config.down, # Down blocks\n config.bottleneck, # bottleneck layers\n config.up, # Up blocks\n ), checkpoint=False)\n\n model.initialize_kernels(torch.nn.init.kaiming_uniform_, conv=True)\n return model\n else:\n return SimpleCNN()", "def create_base_network(NumberOfFeatures, NumberOfClasses,init_mode='glorot_normal'):\n network = Sequential()\n network.add(Dense(44, activation='sigmoid', kernel_initializer=init_mode,input_dim=NumberOfFeatures))\n# network.add(Dense(22, activation='sigmoid',kernel_initializer=init_mode))\n network.add(Dense(NumberOfClasses, activation='softmax',kernel_initializer=init_mode))\n return network", "def resnet34(pretrained=False, mode='rgb', **kwargs):\n if mode == 'flow':\n model = ResNet(BasicBlock, [3, 4, 6, 3], inp=20, **kwargs)\n else:\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n\n return model", "def initModel(self):\n input_shape = (self.params[\"nb_features\"],)\n x = input_tensor = Input(input_shape)\n x = Dense(self.params[\"nb_neurons\"], activation=\"relu\")(x)\n for i in range(2, self.params[\"nb_layers\"] + 1):\n x = Dense(self.params[\"nb_neurons\"], activation=\"relu\")(x)\n if self.params[\"dropout\"]:\n x = Dropout(self.params[\"dropout\"])(x)\n x = output_tensor = Dense(4)(x)\n model = Model(input_tensor, output_tensor)\n return model", "def resnext34(**kwargs):\n model = ResNeXt(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def create_model(self): # noqa: D103\n # reference for creation of the model https://yilundu.github.io/2016/12/24/Deep-Q-Learning-on-Space-Invaders.html\n model=Sequential()\n model.add(Flatten( input_shape=(84,84,4)))\n model.add(Dense(self.num_actions)) \n\n return model", "def create_model() -> Model:\n # Create a neural network model that includes several dense layers with hyperbolic tangent activations, L2 regularization, and batch normalization\n regularizer = l2(0)\n dropout = 0\n activation = 'tanh'\n model = Sequential([\n InputLayer(input_shape=(16,)),\n BatchNormalization(),\n Dense(12, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(8, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(1, kernel_regularizer=regularizer)\n ])\n # Output a summary of the model's architecture\n print(model.summary())\n # Use a mean squared error loss function and an Adam optimizer; do not print accuracy because this is a regression task\n model.compile(\n optimizer='adam',\n loss='mse',\n metrics=['mae']\n )\n # Return the untrained model\n return model", "def build_backbone(config):\n assert config.MODEL.BACKBONE in ['resnet50', 'resnet101'], \"backbone name is not supported!\"\n backbone_name = config.MODEL.BACKBONE\n dilation = False\n train_backbone = not config.EVAL\n return_interm_layers = False #TODO: impl case True for segmentation\n\n position_embedding = build_position_encoding(config.MODEL.TRANS.HIDDEN_SIZE)\n backbone = Backbone(backbone_name, train_backbone, return_interm_layers, dilation)\n model = Joiner(backbone, position_embedding)\n model.num_channels = backbone.num_channels\n\n return model", "def build_model(cls, args, task):\n\n # make sure all arguments are present in older models\n base_lm_architecture(args)\n\n if args.decoder_layers_to_keep:\n args.decoder_layers = len(args.decoder_layers_to_keep.split(\",\"))\n\n if getattr(args, \"max_target_positions\", None) is None:\n args.max_target_positions = getattr(\n args, \"tokens_per_sample\", DEFAULT_MAX_TARGET_POSITIONS\n )\n\n if args.character_embeddings:\n embed_tokens = CharacterTokenEmbedder(\n task.source_dictionary,\n eval(args.character_filters),\n args.character_embedding_dim,\n args.decoder_embed_dim,\n args.char_embedder_highway_layers,\n )\n elif args.adaptive_input:\n embed_tokens = AdaptiveInput(\n len(task.source_dictionary),\n task.source_dictionary.pad(),\n args.decoder_input_dim,\n args.adaptive_input_factor,\n args.decoder_embed_dim,\n options.eval_str_list(args.adaptive_input_cutoff, type=int),\n args.quant_noise_pq,\n args.quant_noise_pq_block_size,\n )\n else:\n embed_tokens = cls.build_embedding(\n args, task.source_dictionary, args.decoder_input_dim\n )\n\n if args.tie_adaptive_weights:\n assert args.adaptive_input\n assert args.adaptive_input_factor == args.adaptive_softmax_factor\n assert (\n args.adaptive_softmax_cutoff == args.adaptive_input_cutoff\n ), \"{} != {}\".format(\n args.adaptive_softmax_cutoff, args.adaptive_input_cutoff\n )\n assert args.decoder_input_dim == args.decoder_output_dim\n\n decoder = LinearTransformerDecoder(\n args, task.target_dictionary, embed_tokens, no_encoder_attn=True\n )\n return cls(decoder)", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def init_model(self):\n model = Sequential()\n model.add(Dense(units=24, input_dim=self.input_shape[0],\n activation='relu'))\n model.add(Dense(units=24, activation='relu'))\n # We want rewards instead of probability, so use linear here\n model.add(Dense(units=self.output_num, activation='linear'))\n model.compile(loss='mse', optimizer=Adam(lr=self.eta))\n return model", "def build_model():", "def build_model(cls, args, task):\n global PAD_IDX, EOS_IDX\n # make sure all arguments are present in older models\n w2v_lm_architecture2(args)\n\n if not hasattr(args, \"max_source_positions\"):\n args.max_source_positions = 2048\n if not hasattr(args, \"max_target_positions\"):\n args.max_target_positions = 2048\n\n tgt_dict = task.target_dictionary\n PAD_IDX = tgt_dict.pad()\n EOS_IDX = tgt_dict.eos()\n\n encoder = cls.build_encoder(args)\n assigner = cls.build_assigner(args, encoder.d)\n lm = cls.build_lm(args, task)\n\n return cls(args, encoder, assigner, lm)", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n\n self.loss_names = ['G_SH']\n self.visual_names = ['input', 'pr_SH', 'gt_SH']\n self.model_names = ['G1']\n\n if not opt.no_brightness:\n self.loss_names += ['G_BA', 'G_BC']\n self.visual_names += ['pr_BA', 'gt_BA']\n self.model_names += ['G3']\n\n if opt.reg_LTM:\n self.loss_names += ['LTMReg']\n\n self.light_res = opt.light_res\n\n\n # Intrinsic network\n if opt.latent_Ls or opt.latent_Lt:\n netG1name = 'unet_256_latent_inL'\n else:\n netG1name = 'unet_256_latent'\n\n input_nc = opt.input_nc\n if opt.in_Ls:\n input_nc += 1\n if opt.in_Lt:\n input_nc += 1\n\n if opt.LTM:\n self.dim_LTM = self.light_res**2\n if self.opt.enc_LTM:\n self.dim_LTM = opt.dim_LTM\n use_hidden = True if not opt.enc_ill_hid==-1 else False\n self.enc_LTM = networks.init_net(networks.IlluminationEncoder(self.light_res**2, opt.enc_ill_hid, self.dim_LTM, use_hidden), opt.init_type, opt.init_gain, self.gpu_ids)\n\n self.netG1 = networks.define_G(input_nc, self.dim_LTM, opt.ngf, netG1name, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, True, self.gpu_ids)\n\n else:\n if opt.no_latent_color:\n output_nc = 3\n else:\n output_nc = 1\n self.netG1 = networks.define_G(input_nc, output_nc, opt.ngf, netG1name, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, False, self.gpu_ids)\n\n # Brightness network\n g3_input_nc = 3\n if opt.cas and opt.cat_In:\n g3_input_nc = g3_input_nc + opt.input_nc\n if not opt.cas:\n if opt.in_Ls:\n g3_input_nc += 1\n if opt.in_Lt:\n g3_input_nc += 1\n self.netG3 = networks.define_G(g3_input_nc, 1, opt.ngf, 'resnet_9blocks_latent', opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, False, self.gpu_ids)\n if self.isTrain:\n # define loss functions\n self.criterionS = torch.nn.MSELoss()\n self.criterionBA = torch.nn.MSELoss()\n # self.criterionBP = torch.nn.MSELoss()\n self.criterionBC = torch.nn.MSELoss()\n self.criterionReg = torch.nn.MSELoss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G1 = torch.optim.Adam(self.netG1.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n # self.optimizer_G2 = torch.optim.Adam(self.netG2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_G3 = torch.optim.Adam(self.netG3.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G1)\n # self.optimizers.append(self.optimizer_G2)\n self.optimizers.append(self.optimizer_G3)", "def create_model_eg_bin3orig(my_learning_rate):\n # This is a first try to get a simple model that works\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Conv2D(\n filters=128, kernel_size=(3,3), input_shape=(8,8,15), strides=(1, 1), padding='same'))\n model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n model.add(tf.keras.layers.Conv2D(\n filters=128, kernel_size=(3,3), strides=(1, 1), padding='same'))\n model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(units=64, activation='relu'))\n model.add(tf.keras.layers.Dense(units=33))\n\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=my_learning_rate),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n return model", "def __init__(self, num_models: int, num_classes: int):\n self.nun_models = num_models\n self.num_classes = num_classes\n self.model: keras.Model = self.init_model()", "def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model", "def ffc_resnet34(pretrained=False, **kwargs):\n model = FFCResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def BuildModel(ANNSetup,model):\n\n if(isinstance(ANNSetup.Activ,str)):\n model.add(Dense(ANNSetup.Neurons[0], kernel_regularizer=l2(ANNSetup.Regu), activation=ANNSetup.Activ, kernel_initializer=Winit(ANNSetup.Winit), input_dim=ANNSetup.InputDim))\n if(ANNSetup.Dropout != None):\n model.add(Dropout(ANNSetup.Dropout))\n for i in range(1,len(ANNSetup.Neurons)):\n if(i == len(ANNSetup.Neurons)-1):\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation='sigmoid'))\n else:\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation=ANNSetup.Activ))\n else:\n model.add(Dense(ANNSetup.Neurons[0], kernel_regularizer=l2(ANNSetup.Regu), kernel_initializer=Winit(ANNSetup.Winit), input_dim=ANNSetup.InputDim))\n model.add(LeakyReLU(alpha=ANNSetup.Activ))\n if(ANNSetup.Dropout != None):\n model.add(Dropout(ANNSetup.Dropout))\n for i in range(1,len(ANNSetup.Neurons)):\n if(i == len(ANNSetup.Neurons)-1):\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation='sigmoid'))\n else:\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit)))\n model.add(LeakyReLU(alpha=ANNSetup.Activ))\n\n return model", "def create_model(hparams, mode):\n\n graph = tf.Graph()\n\n with graph.as_default():\n with tf.name_scope(\"input_pipe\"):\n dataset = create_dataset(hparams, mode)\n iterator = dataset.make_initializable_iterator()\n model = LMandBDRNNModel(hparams=hparams,\n iterator=iterator,\n mode=mode)\n\n sess = tf.Session(graph=graph)\n\n modeltuple = ModelTuple(graph=graph, iterator=iterator,\n model=model, session=sess)\n\n return modeltuple", "def make_model(self, inputs, is_training):\n with tf.variable_scope('ResNet50'):\n x = conv2d(inputs, 64, [7, 7], strides=[1, 2, 2, 1], name='conv1') # size 1/2\n x = bn(x, is_training)\n x = relu(x)\n x = max_pool(x, ksize=[1, 3, 3, 1], name='pool1') # size 1/4\n\n x = self.conv_block(x, [64, 64, 256], '2_1', is_training, s=1)\n x = self.identity_block(x, [64, 64, 256], '2_2', is_training)\n x = self.identity_block(x, [64, 64, 256], '2_3', is_training)\n\n x = self.conv_block(x, [128, 128, 512], '3_1', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_2', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_3', is_training)\n\n x = self.atrous_conv_block(x, [256, 256, 1024], '4_1', 2, is_training, s=1)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_2', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_3', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_4', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_5', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_6', 2, is_training)\n\n x = self.atrous_conv_block(x, [512, 512, 2048], '5_1', 4, is_training, s=1)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_2', 4, is_training)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_3', 4, is_training)\n\n\n\n \"\"\"\n Astrous Pyrimid Pooling. Decoder\n \"\"\"\n with tf.variable_scope('ASPP'):\n feature_map_shape = x.get_shape().as_list()\n\n # global average pooling\n # feature 맵의 height, width를 평균을 낸다.\n feature_map = tf.reduce_mean(x, [1, 2], keepdims=True)\n\n feature_map = conv2d(feature_map, 256, [1, 1], name='gap_feature_map')\n feature_map = tf.image.resize_bilinear(feature_map, [feature_map_shape[1], feature_map_shape[2]])\n\n rate1 = conv2d(x, 256, [1, 1], name='rate1')\n rate6 = atrous_conv2d(x, 256, [3, 3], rate=6, name='rate6')\n rate12 = atrous_conv2d(x, 256, [3, 3], rate=12, name='rate12')\n rate18 = atrous_conv2d(x, 256, [3, 3], rate=18, name='rate18')\n\n concated = tf.concat([feature_map, rate1, rate6, rate12, rate18], axis=3)\n\n net = conv2d(concated, 256, [1, 1], name='net')\n\n logits = conv2d(net, self.N_CLASS, [1, 1], name='logits')\n logits = tf.image.resize_bilinear(logits, size=[self.RESIZE, self.RESIZE], name='out')\n\n pred = tf.argmax(logits, axis=3)\n pred = tf.expand_dims(pred, dim=3)\n\n return logits, pred", "def make_model(self):\n onnx_graph = onnx.helper.make_graph(\n self._nodes, self._name, self._inputs, self._outputs, self._initializers\n )\n kwargs = {}\n kwargs[\"opset_imports\"] = self._get_opsets()\n kwargs[\"producer_name\"] = \"TVM Relay\"\n kwargs[\"producer_version\"] = tvm.__version__\n\n return onnx.helper.make_model(onnx_graph, **kwargs)", "def resnet34(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [3, 4, 6, 3], shortcut_type, num_classes, in_channels)\n return model", "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def get_model():\n # Load the pretrained model.\n model = torchvision.models.resnet34(pretrained=True)\n\n # Resize model for our task.\n model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1,\n bias=False)\n model.avgpool = torch.nn.AvgPool2d(2)\n model.fc = torch.nn.Linear(in_features=512, out_features=10, bias=True)\n\n return model", "def fresnet34_v3(keep, input_size, **kwargs):\n model = ResNet_34(keep, BasicBlock_v3, [3, 4, 6, 3], input_size, **kwargs, embedding_size=128)\n # if pretrained:\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def model_setup(self):\n self.DNN = DgganMLP(self.settings.hidden_size)\n self.D = DgganMLP(self.settings.hidden_size)\n self.G = Generator(self.settings.hidden_size)", "def resnet46(pretrained=False):\n model = ResNet(BasicBlock, [3, 6, 10, 3])\n if pretrained:\n pass\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def init_model(\n sample_length: int, base_model: str, num_classes: int = None\n ) -> torchvision.models.video.resnet.VideoResNet:\n if base_model not in (\"ig65m\", \"kinetics\"):\n raise ValueError(\n f\"Not supported model {base_model}. Should be 'ig65m' or 'kinetics'\"\n )\n\n # Decide if to use pre-trained weights for DNN trained using 8 or for 32 frames\n model_name = f\"r2plus1d_34_{sample_length}_{base_model}\"\n\n print(f\"Loading {model_name} model\")\n\n model = torch.hub.load(\n TORCH_R2PLUS1D,\n model_name,\n num_classes=MODELS[model_name],\n pretrained=True,\n )\n\n # Replace head\n if num_classes is not None:\n model.fc = nn.Linear(model.fc.in_features, num_classes)\n\n return model, model_name", "def ResNetLN34(num_classes=10):\n return ResNet(BasicLNBlock,[3,4,6,3], \n num_classes=num_classes,\n norm_type=\"LN\")", "def __init__(self):\n # Initializing the Model with the class\n super(Model, self).__init__()\n # torch.nn.Linear applies a Linear transformation. The first parameter is the size of each input sample. The second is the size of the output sample\n self.linear = torch.nn.Linear(1, 1)", "def build_model(self):\n import tensorflow as tf\n \n y = tf.nn.relu(tf.matmul(self.variables[\"input_observation\"], self.variables[\"W1\"]) + \n self.variables[\"b1\"], name=\"y1\")\n \n for i in range(self.n_hidden-1):\n y = tf.nn.relu(tf.matmul(y, self.variables[\"W\"+str(i+2)]) + \n self.variables[\"b\"+str(i+2)], name=\"y\"+str(i+2))\n \n self.variables[\"y\"] = [tf.matmul(y, self.variables[\"Wo_0\"]) + self.variables[\"bo_0\"]]\n for i in range(1, len(self.output_size)):\n self.variables[\"y\"] += [tf.matmul(y, self.variables[\"Wo_%s\"%i]) + self.variables[\"bo_%s\"%i]]", "def build_model_mobilenet(num_classes):", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def build_cut_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model = Model(model.input, model.get_layer(self.ex_last_conv_layer_name2).output)\n model.summary()\n return model", "def resnet18(pretrained=False):\n model = ResNet(BasicBlock, [2, 2, 2, 2])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def __init__(self, out_size=2, freeze=False, pretrained=True, arch='resnet50'):\n\n super().__init__()\n\n if arch == 'resnet50':\n model = torchvision.models.resnet50(pretrained=pretrained)\n self.model_name = 'resnet50'\n elif arch == 'resnet18':\n model = torchvision.models.resnet18(pretrained=pretrained)\n self.model_name = 'resnet18'\n elif arch == 'resnet34':\n model = torchvision.models.resnet34(pretrained=pretrained)\n self.model_name = 'resnet34'\n elif arch == 'resnet101':\n model = torchvision.models.resnet101(pretrained=pretrained)\n self.model_name = 'resnet101'\n elif arch == 'resnet152':\n model = torchvision.models.resnet152(pretrained=pretrained)\n self.model_name = 'resnet152'\n elif arch == 'wide_resnet50_2':\n model = torchvision.models.wide_resnet50_2(pretrained=pretrained)\n self.model_name = 'wide_resnet50_2'\n elif arch == 'wide_resnet101_2':\n model = torchvision.models.wide_resnet101_2(pretrained=pretrained)\n self.model_name = 'wide_resnet101_2'\n else:\n model = torchvision.models.resnet18(pretrained=pretrained)\n self.model_name = 'resnet18'\n\n if pretrained and freeze:\n for param in model.parameters():\n param.requires_grad = False\n\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, out_size)\n\n self.model = model", "def _create_base_model(self, modality):\n\n if modality == \"RGB\":\n in_channels = 3\n elif modality == \"Flow\":\n in_channels = 10\n elif modality == \"Audio\":\n in_channels = 1\n\n model_dir = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n )\n model_dir = os.path.join(model_dir, \"weights\")\n\n is_audio = True if modality == \"Audio\" else False\n\n if \"vgg\" in self.base_model_name:\n base_model = VGG(self.cfg.model.vgg.type, modality, in_channels)\n elif \"resnet\" in self.base_model_name:\n base_model = Resnet(self.cfg.model.resnet.depth, modality, in_channels)\n elif self.base_model_name == \"bninception\":\n pretrained = \"kinetics\" if modality == \"Flow\" else \"imagenet\"\n base_model = bninception(\n in_channels,\n modality,\n model_dir=model_dir,\n pretrained=pretrained,\n is_audio=is_audio,\n attend=self.use_attention,\n )\n\n return base_model", "def build_model(self):\n self.g12 = G12(conv_dim=self.g_conv_dim)\n init_weights(self.g12, init_type='normal')\n self.g21 = G21(conv_dim=self.g_conv_dim)\n init_weights(self.g21, init_type='normal')\n self.d1 = D1(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d1, init_type='normal')\n self.d2 = D2(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d2, init_type='normal')\n self.dreid = DSiamese(class_count=self.num_classes_market)\n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n dr_params = list(self.dreid.parameters())\n\n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n self.dr_optimizer = optim.Adam(dr_params, self.lr, [self.beta1, self.beta2])\n\n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()\n self.dreid.cuda()", "def __init__(self):\n \n self.model = Net()\n\n if torch.cuda.is_available():\n map_location=torch.device('cuda')\n else:\n map_location=torch.device('cpu')\n\n # load parameters\n self.model.load_state_dict(torch.load('model.pt',\n map_location=map_location)) \n \n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n \n self.model.eval()", "def build_model():\n # noise for soise sampling in NCE\n noise = build_unigram_noise(\n torch.FloatTensor(corpus.vocab.idx2count)\n )\n\n norm_term = 'auto' if args.norm_term == -1 else args.norm_term\n # setting up NCELoss modules\n if args.index_module == 'linear':\n criterion = IndexLinear(\n args.emsize,\n ntoken,\n noise=noise,\n noise_ratio=args.noise_ratio,\n norm_term=norm_term,\n loss_type=args.loss,\n reduction='none',\n )\n model = RNNModel(\n ntoken, args.emsize, args.nhid, args.nlayers,\n criterion=criterion, dropout=args.dropout,\n )\n elif args.index_module == 'gru':\n if args.nlayers != 1:\n logger.warning('Falling into one layer GRU due to Index_GRU supporting')\n nce_criterion = IndexGRU(\n ntoken, args.emsize, args.nhid,\n args.dropout,\n noise=noise,\n noise_ratio=args.noise_ratio,\n norm_term=norm_term,\n )\n model = GenModel(\n criterion=nce_criterion,\n )\n else:\n logger.error('The index module [%s] is not supported yet' % args.index_module)\n raise(NotImplementedError('index module not supported'))\n\n if args.cuda:\n model.cuda()\n\n logger.info('model definition:\\n %s', model)\n return model", "def raw_model():\n model = cobra.Model(id_or_model=\"raw_model\", name=\"raw_model\")\n rxn_1 = cobra.Reaction(\"BIOMASS_TEST\")\n rxn_2 = cobra.Reaction(\"RXN2\")\n rxn_3 = cobra.Reaction(\"RXN3\")\n rxn_4 = cobra.Reaction(\"RXN4\")\n model.add_reactions([rxn_1, rxn_2, rxn_3, rxn_4])\n model.objective = rxn_3\n return model", "def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def build_model(self, constructor, args):\n dims = {'en': 300, 'es': 50}\n dists = {'en': 'Normal',\n 'es': 'Normal',}\n z_dim = args.model_args.get('z_dim', 64)\n h_dim = args.model_args.get('h_dim', 64)\n n_layers = args.model_args.get('n_layers', 3)\n gauss_out = (args.model != 'MultiDKS') \n encoders = {'en': models.common.DeepGaussianMLP(dims['en'], z_dim, h_dim, n_layers),\n 'es': models.common.DeepGaussianMLP(dims['es'], z_dim, h_dim, n_layers)}\n decoders = {'en': models.common.DeepGaussianMLP(z_dim, dims['en'], h_dim, n_layers),\n 'es': models.common.DeepGaussianMLP(z_dim, dims['es'], h_dim, n_layers)}\n custom_mods = [m for m in ['en', 'es'] if m in args.modalities]\n model = constructor(args.modalities,\n dims=(dims[m] for m in args.modalities),\n dists=[dists[m] for m in args.modalities],\n encoders={m: encoders[m] for m in custom_mods},\n decoders={m: decoders[m] for m in custom_mods},\n z_dim=z_dim, h_dim=h_dim,\n device=args.device, **args.model_args)\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def __init__(self, backbone_name, config):\n\n backbone_config = Schema(\n {\n Required(\"input_shape\"): Schema((int, int, int)),\n Required(\"include_top\"): bool,\n Required(\"weights\"): str,\n Optional(\"alpha\"): float,\n }\n )\n\n config = backbone_config(config)\n\n if backbone_name == \"MobileNetV2\":\n self.model = tf.keras.applications.MobileNetV2(**config)\n elif backbone_name == \"ResNet50\":\n self.model = tf.keras.applications.ResNet50(**config)\n elif backbone_name == \"InceptionV3\":\n self.model = tf.keras.applications.InceptionV3(**config)\n\n # Remove Layers until Conv4\n for i, layer in enumerate(reversed(self.model.layers)):\n if backbone_name == \"ResNet50\" and layer._name == \"conv4_block6_out\":\n break\n elif (\n backbone_name == \"MobileNetV2\" and layer._name == \"block_13_expand_relu\"\n ):\n break\n else:\n self.model._layers.pop()\n\n self.model.layers[-1]._name = \"feature_map\"\n\n self.model = Model(\n self.model.input, self.model.layers[-1].output, name=\"Backbone\"\n )", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)\n return model", "def construct_private_model(input_size, model):\n # get rank of current process\n rank = comm.get().get_rank()\n dummy_input = torch.empty(input_size)\n\n # party 0 always gets the actual model; remaining parties get dummy model\n if rank == 0:\n model_upd = model\n else:\n model_upd = LeNet()\n private_model = crypten.nn.from_pytorch(model_upd, dummy_input).encrypt(src=0)\n return private_model", "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def model_initializer():\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Flatten())\n # model.add(tf.keras.layers.Dense(128, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(64, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(32, activation=tf.nn.elu))\n model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))\n\n\n model.compile(optimizer='rmsprop',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n return model", "def build_model(self):\n if self.args.network_type == 'unet':\n self.shared = models.Unet(self.args)\n else:\n raise NotImplementedError(f'Network type '\n f'`{self.args.network_type}` is not '\n f'defined')\n self.controller = models.Controller(self.args)\n\n if self.args.num_gpu == 1:\n self.shared.cuda()\n self.controller.cuda()\n elif self.args.num_gpu > 1:\n raise NotImplementedError('`num_gpu > 1` is in progress')", "def make_model():\n # create the base pre-trained model\n base_model = efn.EfficientNetB0(input_shape=(img_width, img_height, 3), include_top=False)\n # add a global spatial average pooling layer\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n predictions = Dense(num_classes, activation=\"softmax\")(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n\n model.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n return base_model, model", "def create_model(self, input_shape, num_actions, mode, args, model_name='q_network'):\n assert (mode in (\"linear\", \"duel\", \"dqn\"))\n with tf.variable_scope(model_name):\n input_data = Input(shape=input_shape, name=\"input\")\n if mode == \"linear\":\n # #version 4 elu:\n # flatten_hidden = Flatten(name=\"flatten\")(input_data)\n # FC_1 = Dense(512, activation='elu', name='FC1-elu')(flatten_hidden)\n # FC_2 = Dense(512, activation='elu', name='FC2-elu')(FC_1)\n # FC_3 = Dense(512, activation='elu', name='FC3-elu')(FC_2)\n # FC_4 = Dense(512, activation='elu', name='FC4-elu')(FC_3)\n # output = Dense(num_actions, activation='elu', name=\"output\")(FC_4)\n\n #version 4 elu:\n flatten_hidden = Flatten(name=\"flatten\")(input_data)\n FC_1 = Dense(1024, activation='elu', name='FC1-elu')(flatten_hidden)\n FC_2 = Dense(1024, activation='elu', name='FC2-elu')(FC_1)\n FC_3 = Dense(1024, activation='elu', name='FC3-elu')(FC_2)\n FC_4 = Dense(512, activation='elu', name='FC4-elu')(FC_3)\n output = Dense(num_actions, activation='elu', name=\"output\")(FC_4)\n\n else:\n if not (args.recurrent):\n # # # version 1:\n # h1 = Convolution2D(32, (8, 8), strides=4, activation=\"relu\", name=\"conv1\")(input_data)\n # h2 = Convolution2D(64, (4, 4), strides=2, activation=\"relu\", name=\"conv2\")(h1)\n # h3 = Convolution2D(64, (3, 3), strides=1, activation=\"relu\", name=\"conv3\")(h2)\n # context = Flatten(name=\"flatten\")(h3)\n\n # # version 2:\n # conv1 = Convolution2D(1, (5, 5), strides=1, activation=\"elu\", name=\"conv1\")(input_data)\n # flatten = Flatten(name=\"flatten\")(conv1)\n # FC_2 = Dense(512, activation='elu', name='FC2-elu')(flatten)\n # context = Dense(512, activation='elu', name='FC4-elu')(FC_2)\n\n # version 3:\n conv1 = Convolution2D(32, (2, 2), strides=1, activation=\"relu\", name=\"conv1\")(input_data)\n flatten = Flatten(name=\"flatten\")(conv1)\n FC_2 = Dense(128, activation='relu', name='FC2-relu')(flatten)\n FC_3 = Dense(128, activation='relu', name='FC3-relu')(FC_2)\n context = Dense(128, activation='elu', name='FC4-elu')(FC_3)\n\n\n\n # else:\n # print('>>>> Defining Recurrent Modules...')\n # input_data_expanded = Reshape((input_shape[0], input_shape[1], input_shape[2], 1),\n # input_shape=input_shape)(input_data)\n # input_data_TimeDistributed = Permute((3, 1, 2, 4), input_shape=input_shape)(input_data_expanded)\n # h1 = TimeDistributed(Convolution2D(32, (8, 8), strides=4, activation=\"relu\", name=\"conv1\"), \\\n # input_shape=(args.num_frames, input_shape[0], input_shape[1], 1))(\n # input_data_TimeDistributed)\n # h2 = TimeDistributed(Convolution2D(64, (4, 4), strides=2, activation=\"relu\", name=\"conv2\"))(h1)\n # h3 = TimeDistributed(Convolution2D(64, (2, 2), strides=1, activation=\"relu\", name=\"conv3\"))(h2)\n # flatten_hidden = TimeDistributed(Flatten())(h3)\n # hidden_input = TimeDistributed(Dense(512, activation='relu', name='flat_to_512'))(flatten_hidden)\n # if not (args.a_t):\n # context = LSTM(512, return_sequences=False, stateful=False, input_shape=(args.num_frames, 512))(\n # hidden_input)\n # else:\n # if args.bidir:\n # hidden_input = Bidirectional(\n # LSTM(512, return_sequences=True, stateful=False, input_shape=(args.num_frames, 512)),\n # merge_mode='sum')(hidden_input)\n # all_outs = Bidirectional(\n # LSTM(512, return_sequences=True, stateful=False, input_shape=(args.num_frames, 512)),\n # merge_mode='sum')(hidden_input)\n # else:\n # all_outs = LSTM(512, return_sequences=True, stateful=False,\n # input_shape=(args.num_frames, 512))(hidden_input)\n # # attention\n # attention = TimeDistributed(Dense(1, activation='tanh'))(all_outs)\n # # print(attention.shape)\n # attention = Flatten()(attention)\n # attention = Activation('softmax')(attention)\n # attention = RepeatVector(512)(attention)\n # attention = Permute([2, 1])(attention)\n # sent_representation = merge([all_outs, attention], mode='mul')\n # context = Lambda(lambda xin: K.sum(xin, axis=-2), output_shape=(512,))(sent_representation)\n # # print(context.shape)\n\n if mode == \"dqn\":\n h4 = Dense(512, activation='elu', name=\"fc\")(context)\n output = Dense(num_actions, name=\"output\")(h4)\n # elif mode == \"duel\":\n # value_hidden = Dense(512, activation='relu', name='value_fc')(context)\n # value = Dense(1, name=\"value\")(value_hidden)\n # action_hidden = Dense(512, activation='relu', name='action_fc')(context)\n # action = Dense(num_actions, name=\"action\")(action_hidden)\n # action_mean = Lambda(lambda x: tf.reduce_mean(x, axis=1, keep_dims=True), name='action_mean')(\n # action)\n # output = Lambda(lambda x: x[0] + x[1] - x[2], name='output')([action, value, action_mean])\n model = Model(inputs=input_data, outputs=output)\n print(model.summary())\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def resnet18(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)\n return model", "def build_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model.summary()\n return model", "def create_model(X_train, lyrs=[16], act=\"relu\", opt='Adam', dr=0.2):\n\n # set random seed for reproducibility\n seed(42)\n tf.random.set_seed(42)\n\n model = Sequential()\n\n # create first hidden layer\n model.add(Dense(lyrs[0], input_dim=X_train.shape[1], activation=act))\n\n # create additional hidden layers\n for i in range(1, len(lyrs)):\n model.add(Dense(lyrs[i], activation=act))\n\n # dropout\n model.add(Dropout(dr))\n\n # create output layer\n model.add(Dense(1, activation=\"sigmoid\")) # output layer\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\n\n return model", "def build_mobilenetv2_backbone(cfg, input_shape):\n stem = MobileStem(\n input_shape.channels,\n cfg.MODEL.MOBILENET.STEM_OUT_CHANNELS,\n cfg.MODEL.MOBILENET.NORM,\n cfg.MODEL.MOBILENET.ACTIVATION\n )\n\n model = MobileNetV2(\n stem,\n cfg.MODEL.MOBILENET.INVERTED_RESIDUAL_SETTING,\n cfg.MODEL.MOBILENET.NORM,\n cfg.MODEL.MOBILENET.ACTIVATION,\n cfg.MODEL.MOBILENET.NUM_CLASSES,\n cfg.MODEL.MOBILENET.OUT_FEATURES,\n )\n\n model.freeze(cfg.MODEL.BACKBONE.FREEZE_AT)\n return model", "def __init__(self, slug, num_filters=256, pretrained=True, bifpn=False):\n self.slug = slug\n\n super().__init__()\n if not pretrained:\n print(\"Caution, not loading pretrained weights.\")\n\n if slug == \"eff5\":\n basemodel = timm.create_model('tf_efficientnet_b5_ns', pretrained=pretrained)\n num_bottleneck_filters = 512\n else:\n assert False, \"Bad slug: %s\" % slug\n \n self.bifpn = bifpn\n if bifpn:\n self.BiFPN = BiFPN(num_filters)\n # Access resnet directly in forward pass; do not store refs here due to\n # https://github.com/pytorch/pytorch/issues/8392\n\n self.lateral4 = Conv1x1(num_bottleneck_filters, num_filters)\n self.lateral3 = Conv1x1(176, num_filters)\n self.lateral2 = Conv1x1(64, num_filters)\n self.lateral1 = Conv1x1(40, num_filters)\n\n self.smooth4 = Conv3x3(num_filters, num_filters)\n self.smooth3 = Conv3x3(num_filters, num_filters)\n self.smooth2 = Conv3x3(num_filters, num_filters)\n self.smooth1 = Conv3x3(num_filters, num_filters)\n \n self.enc1 = nn.Sequential(basemodel.blocks[0:2])\n self.enc2 = nn.Sequential(basemodel.blocks[2:3])\n self.enc3 = nn.Sequential(basemodel.blocks[3:5])\n self.enc4 = nn.Sequential(basemodel.blocks[5:7])\n \n self.enc0 = nn.Sequential(basemodel.conv_stem, basemodel.bn1, basemodel.act1)", "def create_model(max_seq_len, adapter_size=64):\n\n # adapter_size = 64 # see - arXiv:1902.00751\n\n # create the bert layer\n with tf.io.gfile.GFile(bert_config_file, \"r\") as reader:\n bc = StockBertConfig.from_json_string(reader.read())\n bert_params = map_stock_config_to_params(bc)\n bert_params.adapter_size = adapter_size\n bert = BertModelLayer.from_params(bert_params, name=\"bert\")\n\n input_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name=\"input_ids\")\n # token_type_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name=\"token_type_ids\")\n # output = bert([input_ids, token_type_ids])\n output = bert(input_ids)\n\n print(\"bert shape\", output.shape)\n cls_out = keras.layers.Lambda(lambda seq: seq[:, 0, :])(output)\n cls_out = keras.layers.Dropout(0.5)(cls_out)\n logits = keras.layers.Dense(units=1024, activation=\"tanh\")(cls_out)\n logits = keras.layers.Dropout(0.5)(logits)\n logits = keras.layers.Dense(units=2, activation=\"softmax\")(logits)\n\n # model = keras.Model(inputs=[input_ids, token_type_ids], outputs=logits)\n # model.build(input_shape=[(None, max_seq_len), (None, max_seq_len)])\n model = keras.Model(inputs=input_ids, outputs=logits)\n model.build(input_shape=(None, max_seq_len))\n\n # load the pre-trained model weights\n load_stock_weights(bert, bert_ckpt_file)\n\n # freeze weights if adapter-BERT is used\n if adapter_size is not None:\n freeze_bert_layers(bert)\n\n model.compile(optimizer=keras.optimizers.Adam(),\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[keras.metrics.SparseCategoricalAccuracy(name=\"acc\")])\n\n model.summary()\n\n return model", "def createModel(self):\n outputs, inputs = baseUNet(self.input_shape,\n self.conv_depth,\n self.n_classes,\n self.init_w,\n self.dropout)\n \n if self.regression == True:\n outputs = Lambda(getPropOfGround)(outputs)\n \n model = Model(inputs = inputs,outputs = outputs)\n \n model.compile(optimizer = self.optimizer,\n loss=self.loss_function,\n metrics=self.metrics)\n\n if self.old_weights != None:\n model.set_weights(self.old_weights)\n self.model = model", "def build_model(self):\n self.model = models.Sequential()\n self.model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))\n self.model.add(layers.MaxPooling2D((2, 2)))\n self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n self.model.add(layers.MaxPooling2D((2, 2)))\n self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n self.model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss='mse', metrics=['mae'])\n self.model.add(layers.Flatten())\n self.model.add(layers.Dense(64, activation='relu'))\n self.model.add(layers.Dense(10, activation='softmax'))\n self.model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n\n return model" ]
[ "0.6753302", "0.6753302", "0.63931054", "0.63634723", "0.6265441", "0.6239585", "0.6229357", "0.6220196", "0.6198277", "0.6196694", "0.6196694", "0.6196694", "0.61152387", "0.6085224", "0.60785514", "0.6060138", "0.60452986", "0.6025176", "0.6025176", "0.6025176", "0.60139596", "0.600001", "0.5968062", "0.5965487", "0.5953345", "0.59395945", "0.59389675", "0.5927736", "0.59229404", "0.59194046", "0.5915818", "0.5915292", "0.59146357", "0.59046906", "0.58983445", "0.58981705", "0.5864004", "0.58599454", "0.58588815", "0.5857882", "0.58475775", "0.58407396", "0.58353186", "0.5833254", "0.58220065", "0.5812821", "0.58067495", "0.5797431", "0.57967716", "0.57953316", "0.5786206", "0.57854587", "0.5785175", "0.5784602", "0.57735467", "0.5753248", "0.57435685", "0.57399035", "0.5721629", "0.5709318", "0.5706614", "0.57050925", "0.5694125", "0.5690244", "0.5681912", "0.56730247", "0.56697106", "0.5668159", "0.5659783", "0.56571966", "0.5656449", "0.56557864", "0.56547743", "0.56407374", "0.5636694", "0.5628983", "0.5628983", "0.5628983", "0.5628983", "0.5628983", "0.5623108", "0.56228566", "0.5622672", "0.561639", "0.56023705", "0.560157", "0.5598056", "0.55913633", "0.55892164", "0.5587879", "0.5587879", "0.5587879", "0.55795133", "0.55769396", "0.556453", "0.5553944", "0.55522263", "0.55520064", "0.5543035", "0.55420125" ]
0.73481995
0
Kernel Density Estimation with Scipy
def kde_scipy(x, x_grid, bandwidth=0.2, **kwargs): # Note that scipy weights its bandwidth by the covariance of the # input data. To make the results comparable to the other methods, # we divide the bandwidth by the sample standard deviation here. #kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs) kde = gaussian_kde(x) return kde.evaluate(x_grid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kde_sklearn(x, x_grid, bandwidth=0.8, **kwargs):\n \n kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)\n #kde_skl = KernelDensity()\n kde_skl.fit(x[:, np.newaxis])\n # score_samples() returns the log-likelihood of the samples\n log_pdf = kde_skl.score_samples(x_grid[:, np.newaxis])\n \n pdf = np.exp(log_pdf)\n\n fig = plt.figure(figsize=(10,10))\n ax = fig.add_subplot(111)\n #ax.imshow((1,1), cmap=plt.cm.gist_earth_r,extent=[xmin, xmax])\n ax.plot(x_grid, pdf, '.', label=\"kernel = kde_sklearn gaussian\", markersize=2)\n ax.text(700, 0.0035, \"N={0} points\".format(x.shape[0]))\n ax.legend(loc='upper left')\n ax.set_xlim([min(x), max(x)])\n ax.set_ylim(-0.001, 0.006)\n plt.show()", "def kernel_density_estimation(x, sigma, n_bins, eps=1e-2):\n N, _, W, H = x.shape\n device = x.device\n ind = torch.linspace(0, n_bins, n_bins+1).unsqueeze(0).unsqueeze(-1).unsqueeze(-1).expand(N, -1, W, H).to(device)\n y = torch.exp((-1./sigma**2)*(x - ind)**2)\n y = threshold_and_normalize_pixels(y, eps=eps)\n return y", "def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val", "def kde_scipy(x, x_grid, bandwidth=0.2, **kwargs):\n \n #kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs)\n kde = gaussian_kde(x)\n pdf = kde.evaluate(x_grid)\n \n fig = plt.figure(figsize=(10,10))\n ax = fig.add_subplot(111)\n #ax.imshow((1,1), cmap=plt.cm.gist_earth_r,extent=[xmin, xmax])\n ax.plot(x_grid, pdf, 'k.', label=\"kernel = kde_scipy gaussian\", markersize=2)\n ax.text(700, 0.0035, \"N={0} points\".format(x.shape[0]))\n ax.legend(loc='upper left')\n ax.set_xlim([min(x), max(x)])\n ax.set_ylim(-0.001, 0.006)\n plt.show()", "def find_kernel_value(xt, xi, h, d):\n first_term = 1 / ((2 * np.pi) ** (d / 2))\n second_term_1 = np.linalg.norm(xt - xi) / (2 * h ** 2)\n kernel2 = first_term * np.exp(-second_term_1)\n return kernel2", "def find_density(attr, D, h):\n d = D.shape[1]\n n = D.shape[0]\n total = 0\n for xi in D:\n kernel = find_kernel_value(attr, xi, h, d)\n total += kernel\n return total / (n * h ** d)", "def _kernel(self, bw, X, x):\n return (1.0 / np.sqrt(2 * np.pi) / bw) * np.exp(\n -((X - x) ** 2) / (bw ** 2 * 2.0)\n )", "def Kernel(x, y):\n\n Result = (np.dot(x_train[x, :], x_train[y, :])+1)**5 # Polynomial\n #Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n #Gaussian\n \"\"\"\n sigma = 1\n if np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result", "def gaussian_kernel(values, \n bandwidth):\n # Compute the kernel value for the given values\n temp_1 = np.multiply(np.pi, np.square(bandwidth))\n temp_2 = np.divide(1, np.sqrt(temp_1))\n temp_3 = np.divide(values, np.square(bandwidth))\n kernel_value = np.exp(np.multiply(np.negative(0.5), temp_3))\n # Return the computed kernel value\n return kernel_value", "def test_gauss_kernel():\n\n gauss = gauss_kernel(2, 5)\n\n assert gauss.shape == (5, 5)\n assert gauss[2, 2] == 0.039788735772973836", "def _kernel(self, point, observation, bandwidth):\n denom = bandwidth * ((2*math.pi)**.5) \n num = math.exp(-0.5 * ((point-observation)/bandwidth)**2)\n return num/denom", "def kernel_filter(self):\n self.config.logger.info(\"Creating and processing kernel density...\")\n\n # reset start time\n t0 = time.time()\n\n # instantiate kernel density class\n self.kd = KernelDensity(self.config.spatial_resolution, self.spat_coords, self.final_landclasses,\n self.config.kernel_distance, self.ngrids, self.order_rules)\n\n # preprocess year-independent kernel density data\n self.lat, self.lon, self.cellindexresin, self.pft_maps, self.kernel_maps, self.kernel_vector, self.weights = self.kd.preprocess_kernel_density()\n\n # log processing time\n self.config.logger.info('PERFORMANCE: Kernel density filter prepared in {0} seconds'.format(time.time() - t0))", "def kde_statsmodels_u(x, x_grid, bandwidth=0.2, **kwargs):\n \n kde = KDEUnivariate(x)\n #kde.fit(bw=bandwidth, **kwargs)\n kde.fit()\n pdf = kde.evaluate(x_grid)\n\n fig = plt.figure(figsize=(10,10))\n ax = fig.add_subplot(111)\n #ax.imshow((1,1), cmap=plt.cm.gist_earth_r,extent=[xmin, xmax])\n ax.plot(x_grid, pdf, 'k.', label=\"kernel = kde_statsmodels_u gaussian\", markersize=2)\n ax.text(700, 0.0035, \"N={0} points\".format(x.shape[0]))\n ax.legend(loc='upper left')\n ax.set_xlim([min(x), max(x)])\n ax.set_ylim(-0.001, 0.006)\n plt.show()", "def test_uv_degrid_gaussian_kernel():\n\n layout = read_layout(layout_path=f\"{test_data}/test_mwa.txt\")\n xyz = enh_xyz(layout=layout, latitude=mwa_geo.latitude.radians)\n uvw = xyz_uvw(xyz=xyz, freq=freq, dec0=mwa_geo.latitude.radians, ha0=0)\n uv = uv_degrid(\n max_lambda=1400, nside=20, uvw=uvw, sigma=3, kersize=21, kernel=\"gaussian\"\n )\n\n assert uv.shape == (20, 20)\n assert uv[0, 0] == 1.295932713086053e-05", "def _densityctr(self, rangex, rangey, dim = misc.DEF_VIS_DIM):\n gr = N.meshgrid(rangex, rangey)\n x = gr[0].flatten()\n y = gr[1].flatten()\n xdata = N.concatenate((x[:, N.newaxis], y[:, N.newaxis]), axis = 1)\n dmu = self.mu[:, dim]\n dva = self._get_va(dim)\n den = GM.fromvalues(self.w, dmu, dva).pdf(xdata, log = True)\n den = den.reshape(len(rangey), len(rangex))\n\n return gr[0], gr[1], den", "def _kernel(self, x, y, t):\n return (self.C / (2 * np.pi * self.sigma_x * self.sigma_y * t)) * \\\n tf.exp(- self.beta * t - (tf.square(x)/tf.square(self.sigma_x) + tf.square(y)/tf.square(self.sigma_y)) / (2*t))", "def sbil_kernel(delta, obs_stats, t, ar, s, kernel='Gaussian'):\n #np.random.shuffle(delta)\n print(delta)\n sbil_kernel_estimate = []\n obs_stats = obs_stats[delta > 0]\n\n sim_theta = [select.generate_theta_sv(ar) for i in range(s)]\n sim_theta = np.matrix(sim_theta).T\n\n # Generate out sample of time series.\n sim_y = [sim.sim_sv(t, sim_theta[0, i], sim_theta[1, i], sim_theta[2, i],\n sim_theta[3, i], 1) for i in range(s)]\n \n # Generate out sample statistics.\n sim_stats = [sum_stat.sv_stats(delta, sim_y[i]) for i\n in range(s)]\n\n sim_theta_mean = sum(sim_theta.T)/s\n\n # Compute sample variance.\n u = sum([np.square(sim_theta[:, i] - sim_theta_mean.T)\n for i in range(s)])/s\n\n # Standardize parameter vectors.\n sim_theta = np.hstack([(sim_theta[:, i] - sim_theta_mean.T)/np.sqrt(u)\n for i in range(s)])\n\n global theta_sigma\n global theta_mean\n theta_sigma = np.sqrt(u)\n theta_mean = sim_theta_mean\n\n # Standardize observed statistics.\n obs_stats = (obs_stats - np.mean(sim_stats, 0))/np.std(sim_stats, 0)\n\n # Compute sample mean.\n sim_stats_mean = sum(sim_stats)/s\n\n # Compute sample variance.\n u = sum([np.square(sim_stats[i]-sim_stats_mean) for i in range(s)])/s\n\n # Standardize simulated statistics.\n sim_stats = [(sim_stats[i] - sim_stats_mean)/np.sqrt(u) for i in range(s)]\n\n # Identify k nearest neighbors.\n norms = [np.linalg.norm(obs_stats-sim_stats[i]) for i in range(s)]\n closest_index = np.argsort(norms)\n closest_thetas = [sim_theta[:, i] for i in closest_index[0:round(s*0.03)]]\n\n # Compute k-nn estimate.\n estimate_standard = (sum(closest_thetas)/len(closest_thetas))\n\n estimate = np.array(estimate_standard.T)*np.array(\n theta_sigma.T) + np.array(theta_mean)\n\n return estimate", "def kernel(self, cosmo, z, ell):\n z = np.atleast_1d(z)\n # Extract parameters\n pzs, bias = self.params\n # Retrieve density kernel\n kernel = density_kernel(cosmo, pzs, bias, z, ell)\n return kernel", "def gauss_kernel(n_fwhm,sigma):\n\n x_length = int(n_fwhm * sigma + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n \n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n g = numpy.exp(-(x**2/(2*(float(sigma)**2))+y**2/(2*(float(sigma)**2))))\n return g / g.sum()", "def my_kernel(X, Y):\n S = 0.84 # parameter from rhos\n\n if dset == 1:\n gamma = 0.0005\n else:\n gamma = 0.00087 # maximise variance of kernel matrix\n if np.array_equal(X, Y):\n N = X.shape[0]\n M = (1 - S) * np.ones((N, N)) + S * np.eye(N)\n else:\n M = 1\n\n pairwise_sq_dists = cdist(X, Y, 'sqeuclidean')\n K = exp(-gamma * pairwise_sq_dists) * M\n return K", "def ksdensity(data, width=0.3):\r\n def ksd(x_axis):\r\n def n_pdf(x, mu=5., sigma=3.): # normal pdf\r\n u = (x - mu) / abs(sigma)\r\n y = (1 / (np.sqrt(2 * np.pi) * abs(sigma)))\r\n y *= np.exp(-u * u / 2)\r\n return y\r\n prob = [n_pdf(x_i, data, width) for x_i in x_axis]\r\n pdf = [np.average(pr) for pr in prob] # each row is one x value\r\n return np.array(pdf)\r\n return ksd", "def gauss_kernel(X, test_locs, X_org, test_locs_org, sigma, sigma0, epsilon):\r\n DXT = Pdist2(X, test_locs)\r\n DXT_org = Pdist2(X_org, test_locs_org)\r\n # Kx = torch.exp(-(DXT / sigma0))\r\n Kx = (1 - epsilon) * torch.exp(-(DXT / sigma0) - DXT_org / sigma) + epsilon * torch.exp(-DXT_org / sigma)\r\n return Kx", "def density(self, arg):\n return self.gb2_density(np.exp(arg)) * np.exp(arg)", "def test_kde_scipy(limits):\n data = np.random.normal(0, 1, 10000)\n grid, density_own = _kde(data, custom_lims=limits)\n density_sp = gaussian_kde(data).evaluate(grid)\n np.testing.assert_almost_equal(density_own.sum(), density_sp.sum(), 1)", "def probability_density(self, X):\n raise NotImplementedError", "def GaussianKernel(radius, std):\n size = 2 * radius + 1\n weight = torch.ones(size, size)\n weight.requires_grad = False\n for i in range(-radius, radius+1):\n for j in range(-radius, radius+1):\n dis = (i * i) + (j * j)\n weight[i+radius][j+radius] = np.exp(-dis / (2 * std * std))\n weight = weight / weight.sum()\n return weight", "def density_kernel(cosmo, pzs, bias, z, ell):\n if any(isinstance(pz, rds.delta_nz) for pz in pzs):\n raise NotImplementedError(\n \"Density kernel not properly implemented for delta redshift distributions\"\n )\n # stack the dndz of all redshift bins\n dndz = np.stack([pz(z) for pz in pzs], axis=0)\n # Compute radial NLA kernel: same as clustering\n if isinstance(bias, list):\n # This is to handle the case where we get a bin-dependent bias\n b = np.stack([b(cosmo, z) for b in bias], axis=0)\n else:\n b = bias(cosmo, z)\n radial_kernel = dndz * b * bkgrd.H(cosmo, z2a(z))\n # Normalization,\n constant_factor = 1.0\n # Ell dependent factor\n ell_factor = 1.0\n return constant_factor * ell_factor * radial_kernel", "def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H", "def get_density_kernel(cosmo, *, dndz):\n z_n, n = _check_array_params(dndz, 'dndz')\n _check_background_spline_compatibility(cosmo, dndz[0])\n # this call inits the distance splines neded by the kernel functions\n chi = cosmo.comoving_radial_distance(1./(1.+z_n))\n status = 0\n wchi, status = lib.get_number_counts_kernel_wrapper(cosmo.cosmo,\n z_n, n,\n len(z_n),\n status)\n check(status, cosmo=cosmo)\n return chi, wchi", "def even_kernel_der(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n #Derivatives of Legendre polynomials\n DlegPolys = legp_der(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*DlegPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def projection_kernel(self, dataset, testset, C):\n N = dataset.shape[0]\n D = testset.shape[0]\n K = np.zeros((D, N), dtype=float)\n for i in range(D):\n for j in range(N):\n K[i, j] = self.Gaussian_Kernel(testset[i], dataset[j], C)\n\n return K", "def gauss_kern(sigma,h):\n h1 = h\n h2 = h\n x, y = np.mgrid[0:h2, 0:h1]\n x = x-h2/2\n y = y-h1/2\n # sigma = 10.0\n g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) )\n return g / g.sum()", "def density(self, arg):\n out = 0\n for weight, mean, std in zip(self.weights, self.means, self.stds):\n scale = std * self.data['maturity']**.5\n loc = ((mean - self.data['riskfree']) *\n self.data['maturity'] - scale**2)\n out += weight * scs.norm(loc, scale).pdf(arg)\n return out", "def gaussian_kernel(size, sigma): \n \n kernel = np.zeros((size, size))\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n k = (size - 1) / 2\n sigma_sq = sigma ** 2\n pi_sigma = 1/(2 * np.pi * sigma_sq)\n for i in range(size):\n for j in range(size):\n kernel[i, j] = pi_sigma * np.exp(-0.5 * ((i-k)**2 + (j-k)**2) / (sigma_sq))\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return kernel", "def __guassian_kernel(x, sigma=200):\n return (1 / (sqrt(2.*pi) * sigma)) * exp(-x ** 2 / (2.*sigma**2))", "def rate_density(x, a):\n return a * x", "def gaussian_1xDerivative_kernel(windowX, windowY, sigma):\n # See [http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MARBLE/low/edges/canny.htm]\n X, Y = createKernalWindowRanges(windowX, windowY, increment)\n \n g_dx_kernel = gaussianFirstDerivative(X, 0, sigma) * gaussianNormalised(Y, 0, sigma)\n gSum = np.sum(np.abs(g_dx_kernel))\n \n if gSum == 0:\n print \"Warning dx_g_kernel:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (g_dx_kernel)\n else:\n return (g_dx_kernel / gSum)", "def density(x, m, h):\n \n n = x.size\n rho = np.zeros((n,1))\n \n for i in range(0, n):\n # calculate vector between two particles\n uij = x[i] - x\n # calculate contribution due to neighbors\n rho_ij = m*kernel( uij, h, '0' )\n # accumulate contributions to the density\n rho[i] = rho[i] + np.sum(rho_ij)\n \n return rho", "def aGMKernel(Ni,Nj,alpha,gamma):\n \n #Dimension of data\n d = Ni.mu.size\n I = sp.eye(d)\n\n ##Normalisation\n deltaMean = (Ni.mu-Nj.mu).reshape(d,)\n SigmaSum = alpha * (Ni.Sigma+Nj.Sigma) + I/gamma\n Kij = (linalg.det(2*gamma*alpha * Ni.Sigma + I) * linalg.det(2*gamma*alpha * Nj.Sigma + I))**0.25\n Kij *= sp.exp(-0.5*sp.dot(deltaMean.T,linalg.solve(SigmaSum,deltaMean)))\n Kij /= sp.sqrt(linalg.det(SigmaSum*gamma)) \n \n return Kij", "def gaussian_kernel(size, sigma):\n\n kernel = np.zeros((size, size))\n\n ### YOUR CODE HERE\n k = (size-1)/2\n factor = 1/(2*np.pi*sigma**2)\n for i in range(size):\n for j in range(size):\n exponent = -((i-k)**2 +(j-k)**2)/(2*sigma**2)\n kernel[i,j] = factor*np.exp(exponent)\n ### END YOUR CODE\n\n return kernel", "def gkern2d(kernlen=21, nsig=3):\n x = np.linspace(-nsig, nsig, kernlen+1)\n kern1d = np.diff(st.norm.cdf(x))\n kern2d = np.outer(kern1d, kern1d)\n return kern2d/kern2d.max()", "def compute_kernel_matrix(x,y,sigma):\n m = len(x)\n\n s = np.zeros((m,m))\n for i in range(len(x)):\n for j in range(i+1):\n s[i,j] = np.exp(-((x[i]-y[j])**2)/(2*sigma**2))\n for i in range(2,m):\n for j in range(0,i):\n s[i,j] = s[j,i]\n return s", "def fitKDE(obs, bWidth=0.25, kernel=\"gaussian\", x=None):\n obs = fix_shape(obs)\n kde = KernelDensity(kernel=kernel, bandwidth=bWidth).fit(obs)\n if x is None:\n x = np.unique(obs).reshape(-1, 1)\n x = fix_shape(x)\n logProb = kde.score_samples(x) # log(density)\n pdf = pd.Series(np.exp(logProb), index=x.flatten())\n return pdf", "def get_gauss_kernel(sigma, samples):\n p = ny.ceil (2*ny.sqrt(2*ny.log(2))*sigma)\n r = ny.linspace(-p, p, samples)\n x,y = ny.meshgrid(r, r)\n b=bivariate_normal(x,y,sigma,sigma)\n A=(1/ny.sum(b))\n B=A*b\n return x,y,B", "def kernel_rbf(x, y,gamma):\r\n return np.exp(- gamma * np.linalg.norm(x- y)**2)", "def kde(x, mu, sigma, DIMENSION=2):\n dist_sq = np.sum((x - mu)**2, axis=1)\n kde_val = (1/((sigma**2)*2*np.pi))**(0.5*DIMENSION)*np.exp(-dist_sq/(2*(sigma**2)))\n return np.mean(kde_val)", "def estimateDensity(self, sampledPDF, h=1, kernel='box'):\n\n if kernel=='box':\n kernel = np.ones((h,h))\n else:\n kernel = np.ones((h,h))\n PDF = image.correlate(input=sampledPDF, weights=kernel, mode='mirror')\n PDF = PDF/np.sum(PDF) #normalize pdf\n\n return PDF", "def method_ERVKDE(data, xs, ys, DIMENSION = 2):\n mu, sigma = rvkde_sigmas(data, int(len(data)/10), DIMENSION)\n sig_avg = np.mean(np.std(data))\n diff = ((4*sig_avg**5/(len(data)*(DIMENSION+2)))**(1/(DIMENSION+4))) - np.median(sigma)\n elevated_sigma = np.array([s + diff for s in sigma])\n return get_density(xs, ys, mu, elevated_sigma, DIMENSION)", "def gaussian_1yDerivative_kernel(windowX, windowY, sigma):\n # See [http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MARBLE/low/edges/canny.htm]\n X, Y = createKernalWindowRanges(windowX, windowY, increment)\n \n g_dy_kernel = gaussianFirstDerivative(Y, 0, sigma) * gaussianNormalised(X, 0, sigma)\n gSum = np.sum(np.abs(g_dy_kernel))\n \n if gSum == 0:\n print \"Warning dy_g_kernel:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (g_dy_kernel)\n else:\n return (g_dy_kernel / gSum)", "def cal_gaussian_process(b, sigma2, X_train, y_train, X_test):\n n = X_train.shape[0]\n p = X_test.shape[0]\n\n K_n = np.array([[kernel(X_train[i], X_train[j], b) for i in range(n)] for j in range(n)])\n inv = np.linalg.inv(np.diag([sigma2] * n) + K_n)\n miu = np.zeros(p)\n Sigma = np.zeros(p)\n \n for j in range(p): # for every new point x0 in testing data.\n x0 = X_test[j]\n K_Dn = np.zeros(n) # initialize K_Dn \n for i in range(n):\n K_Dn[i] = kernel(X_train[i], x0, b) # calculate every item in K_Dn\n \n miu[j] = K_Dn.dot(inv).dot(y_train)[0] # calculate new distribution parameters\n Sigma[j] = sigma2 + kernel(x0, x0, b) - K_Dn.dot(inv).dot(K_Dn.T)\n \n return miu, Sigma", "def KDE_smoothing(self, model, groupname):\n\t\t#Load in necessary info for either data or noise depending on the passed model\n\t\tif model == 'Signal':\n\t\t\tdic = self.signal\n\t\telif model == 'Noise':\n\t\t\tdic = self.noise\n\t\telse:\n\t\t\t#Raise error here\n\t\t\tpass\n\t\t\n\t\tn_params = dic[groupname]['dimension']\n\t\tKDE_ranges = dic[groupname]['KDE ranges']\n\t\tbandwidths = dic[groupname]['KDE bandwidths']\n\t\tdata = dic[groupname]['data']\n\t\tgrid_points = dic[groupname]['KDE points']\n\t\t\n\t\t#Initialize grid over which to do KDE binning\n\t\tgrid_values = [None]*n_params\n\t\tfor i in xrange(n_params):\n\t\t\tgrid_values[i] = np.linspace(start=KDE_ranges[i,0], stop=KDE_ranges[i,1], num=grid_points[i])\n\t\t\n\t\tif n_params == 1:\n\t\t\tlocation_kde = np.transpose(np.array(grid_values))\n\t\telse:\n\t\t\tlocation_kde = np.array(np.meshgrid(*grid_values,indexing='ij'))\n\t\t\tlocation_kde = np.rollaxis(location_kde, 0, (n_params + 1))\n\t\t\n\t\tgrid_shape = ()\n\t\tfor i in xrange(n_params):\n\t\t\tgrid_shape += (grid_points[i],)\n\t\theight_kde = np.zeros(grid_shape)\n\t\t\n\t\t#Calculate height contribution of each data point over the grid\n\t\tfor i in xrange(len(data)):\n\t\t\theight_kde += np.exp( -0.5 * np.sum( ((data[i,:] - location_kde)/bandwidths)**2., axis=-1) ) / (np.product(bandwidths) * (2.*np.pi)**(n_params/2.))\t\n\n\t\theight_kde /= float(len(data))\n\t\t\n\t\treturn location_kde, height_kde", "def kde2D(x, y, bandwidth, xbins=100j, ybins=100j, **kwargs):\n\n # create grid of sample locations (default: 100x100)\n xx, yy = np.mgrid[x.min():x.max():xbins, \n y.min():y.max():ybins]\n\n xy_sample = np.vstack([yy.ravel(), xx.ravel()]).T\n xy_train = np.vstack([y, x]).T\n\n kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)\n kde_skl.fit(xy_train)\n\n # score_samples() returns the log-likelihood of the samples\n z = np.exp(kde_skl.score_samples(xy_sample))\n return xx, yy, np.reshape(z, xx.shape)", "def gaussian_kernel(windowX, windowY, sigma):\n X,Y = createKernalWindowRanges(windowX, windowY, increment)\n \n gKernel = gaussianNormalised(X, 0, sigma) * gaussianNormalised(Y, 0, sigma)\n gSum = np.sum(np.abs(gKernel))\n \n if gSum == 0:\n print \"Warning gaussian_kernel:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (gKernel)\n else:\n return (gKernel / gSum)", "def gkern(kernlen=21, nsig=3):\n import scipy.stats as st\n\n x = np.linspace(-nsig, nsig, kernlen)\n kern1d = st.norm.pdf(x)\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n return kernel", "def gkern(kernlen=21, nsig=3):\n import scipy.stats as st\n\n x = np.linspace(-nsig, nsig, kernlen)\n kern1d = st.norm.pdf(x)\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n return kernel", "def cost_function(x, N, w, dt):\n yh = np.abs(fftkernel(x, w / dt)) # density\n # formula for density\n C = np.sum(yh ** 2) * dt - 2 * np.sum(yh * x) * \\\n dt + 2 / np.sqrt(2 * np.pi) / w / N\n C = C * N * N\n # formula for rate\n # C = dt*sum( yh.^2 - 2*yh.*y_hist + 2/sqrt(2*pi)/w*y_hist )\n return C, yh", "def make_conditional_density(bgm_fit, threshold, sigma, width):\n pass", "def gkern1(kernlen=21, nsig=3):\n interval = (2*nsig+1.)/(kernlen)\n x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1) \n kern1d = np.diff(scipy.stats.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw/kernel_raw.sum()\n \n return kernel", "def createFromData(data, r):\n m = DensityEstimator()\n #print \"Creating denisty estimator from data...\"\n\n m.mixture_dimensions = data.shape[1]\n m.dataset_size = data.shape[0]\n m.threshold = r\n \n # Normalisation of the data set\n m.normalisation_vector_min = np.array([0.0]*data.shape[1])\n m.normalisation_vector_max = np.array([0.0]*data.shape[1])\n for i in range(0, data.shape[1]):\n #data[:, i] -= min(data[:, i])\n #print i, \": \", max(data[:, i])\n m.normalisation_vector_min[i] = min(data[:, i]) \n m.normalisation_vector_max[i] = max(data[:, i])\n if m.normalisation_vector_min[i] == m.normalisation_vector_max[i]:\n m.normalisation_vector_min[i] = 0.0\n \n #data[:, i] -= m.normalisation_vector_min[i]\n #data[:, i] /= (m.normalisation_vector_max[i] - m.normalisation_vector_min[i])\n #data[:, i] /= (m.normalisation_vector_max[i] )\n #print m.normalisation_vector[i]\n #return\n \n # Split into clusters / kernels\n t=set()\n d=set(range(0,data.shape[0]))\n def get_random():\n sample = random.randint( 0, data.shape[0]-1 )\n while sample in t:\n sample = random.randint( 0, data.shape[0] - 1 )\n t.add(sample)\n return sample\n \n sets=[]\n while len(t)<data.shape[0]:\n i = get_random()\n sets.append(set([i]))\n #print \"Cluster at\", i, \n for j in d-t:\n if np.sqrt(sum((data[i,:]-data[j,:])**2)) < r:\n sets[-1].add(j)\n t.add(j)\n #print \"members = \", len (sets[-1])\n new_set = list()\n for i in sets[-1]:\n new_set.append(data[i, :])\n sets[-1] = np.array(new_set)\n \n #print \"Cluster count=\",len(sets)\n #for i in sets:\n #print 'sets: ',i\n \n # Turn each cluster into a Gaussian kernel\n\n # Any set that has less than 3 members merge with nearests\n minimum_members = 2 #data.shape[1] # mixture dimensions to avoid singular covariance.\n #print sets\n #for i in sets:\n #if len(i) < minimum_members:\n #print \"Small set \"\n #for pt in i:\n #close = [None, 1e100000]\n #for j in sets:\n ##print i\n #print j\n #if (i == j).all():\n #print \"same\"\n #continue\n #print \"diff\"\n #if len(j) < minimum_members:\n #continue # err, bad, should allow possibile join smalls\n #mu = np.sum(i, 0) / (1.0 * i.shape[0])\n #dist = sum([(m - p)**2 for m, p in zip(mu, pt)])\n #print dist\n #if dist < close[1]:\n #close[1] = dist\n #close[0] = j\n #close[0].add(pt)\n \n \n m.kernels = []\n for i in sets:\n if len(i) >= minimum_members:\n m.kernels.append(GaussKernel(i, i.shape[0]/(1. * data.shape[0])))\n else:\n print \"LOST SOME\"\n #else:\n #print \"->Warning: dropping data point as it lies alone, singular\"\n #print \"ok.\"\n return m", "def gaussian2d(filter_size=5, sig=1.0):\n ax = np.arange(-filter_size // 2 + 1., filter_size // 2 + 1.)\n xx, yy = np.meshgrid(ax, ax)\n kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))\n return kernel / np.sum(kernel)", "def _kernel(self, x1, x2, beta=1):\n d = (x1 - x2)**2\n return np.exp(-beta * d)", "def _calc_density(x: np.ndarray, y: np.ndarray):\n from scipy.stats import gaussian_kde\n\n # Calculate the point density\n xy = np.vstack([x, y])\n z = gaussian_kde(xy)(xy)\n\n min_z = np.min(z)\n max_z = np.max(z)\n\n # Scale between 0 and 1\n scaled_z = (z - min_z) / (max_z - min_z)\n\n return scaled_z", "def _CCDkernel(CCDx=10, CCDy=10, width_x=0.35, width_y=0.4, size=21):\n x = np.arange(0, size)\n y = np.arange(0, size)\n xx, yy = np.meshgrid(x, y)\n xx = xx.flatten()\n yy = yy.flatten()\n CCD = models.Gaussian2D(1., CCDx, CCDy, width_x, width_y, 0.)\n CCDdata = CCD.eval(xx, yy, 1., CCDx, CCDy, width_x, width_y, 0.).reshape((size, size))\n fileIO.writeFITS(CCDdata, 'CCDPSF.fits', int=False)\n return CCDdata", "def Pkernel(x):\n\n m = (x < 0.) & (x >= 1.)\n x[x < 0.] = np.zeros(np.sum(x < 0.))\n x[x >= 1.] = np.zeros(np.sum(x >= 1.))\n x = np.sqrt(x)\n\n result = np.log(2.) * np.log(2.) - np.pi *np.pi / 6. \\\n + 2. * spence(0.5 + 0.5 * x) - (x + x*x*x) / (1. - x*x) \\\n + (np.log(1. + x) - 2. * np.log(2.)) * np.log(1. - x) \\\n + 0.5 * (np.log(1. - x) * np.log(1. - x) - np.log(1. + x) * np.log(1. + x)) \\\n + 0.5 * (1. + x*x*x*x) / (1. - x*x) * (np.log(1. + x) - np.log(1. - x))\n result[x <= 0.] = np.zeros(np.sum(x <= 0.))\n result[x >= 1.] = np.zeros(np.sum(x >= 1.))\n return result", "def estimate_density(\n # pylint: disable=too-many-arguments,too-many-locals\n x,\n bw=\"silverman\",\n grid_len=256,\n extend=True,\n bound_correction=False,\n adaptive=False,\n extend_fct=0.5,\n bw_fct=1,\n bw_return=False,\n custom_lims=None,\n):\n\n # Check `x` is from appropiate type\n x = check_type(x)\n \n # Assert `bw_fct` is numeric and positive\n # Note: a `bool` will not trigger the first AssertionError, \n # but it is not a problem since True will be 1\n # and False will be 0, which triggers the second AssertionError.\n assert isinstance(bw_fct, (int, float))\n assert bw_fct > 0\n\n # Preliminary calculations\n x_len = len(x)\n x_min = x.min()\n x_max = x.max()\n x_std = (((x ** 2).sum() / x_len) - (x.sum() / x_len) ** 2) ** 0.5\n x_range = x_max - x_min\n\n # Length warning. Not completely sure if it is necessary\n len_warning(x_len)\n \n # Determine grid\n grid_min, grid_max, grid_len = get_grid(\n x_min, x_max, x_std, extend_fct, grid_len, custom_lims, extend, bound_correction\n )\n \n grid_counts = histogram1d(x, bins=grid_len, range=(grid_min, grid_max))\n grid_edges = np.linspace(grid_min, grid_max, num=grid_len + 1) \n\n # Bandwidth estimation\n bw = bw_fct * get_bw(x, bw, grid_counts=grid_counts, x_std=x_std, x_range=x_range)\n\n # Density estimation\n if adaptive:\n grid, pdf = kde_adaptive(x, bw, grid_edges, grid_counts, grid_len, bound_correction)\n else:\n grid, pdf = kde_convolution(x, bw, grid_edges, grid_counts, grid_len, bound_correction)\n \n if bw_return:\n return grid, pdf, bw\n else:\n return grid, pdf", "def KernelTest(x, y):\n\n Result = (np.dot(x_test[x, :], x_train[y, :])+1)**5 # Polynomial\n # Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n # Sum = DotProduct(x, y)\n #Sum = 0.0\n #for i in range(2):\n # Sum = Sum + x_train[x, i]*x_train[y, i]\n # Result = (Sum+1)**5\n \"\"\"\n #Gaussian\n sigma = 1\n if np.ndim(x_test[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_test[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_test[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_test[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_test[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_test[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result", "def gaussian_kernel(training_ex, landmark, sigma=0.1):\n return np.exp(-(np.linalg.norm(training_ex - landmark) ** 2 / (2 * (sigma ** 2))))", "def KSStat(xs,ys,reweight=False,cdf_x=None,cdf_y=None,data_range=None):\n if cdf_x is None and cdf_y is None and data_range is None:\n data_range = list(set(xs)) + list(set(ys))\n if cdf_x is None:\n cdf_x = cum_density_func(xs,norm=True,rank=False,data_range=data_range)\n if cdf_y is None:\n cdf_y = cum_density_func(ys,norm=True,rank=False,data_range=data_range)\n keys = set(cdf_x.keys()+cdf_y.keys())\n SP = []\n for k in keys:\n if k in cdf_x and k in cdf_y:\n SP.append((cdf_x[k],cdf_y[k]))\n if reweight:\n return np.max([np.abs(s-p)/np.sqrt(p*(1.0-p)) for (s,p) in SP])\n else:\n return np.max([np.abs(s-p) for (s,p) in SP])", "def density(self):\n return self.nnz/self.dim", "def kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs*legPolys \n\n return ker.sum() / (4.0*np.pi)", "def _estimate_density(self, x):\n\n self.density_, self.bins_ = np.histogram(x, bins=10, density=True)", "def N2_f(d1,d2,rho):\n import statsmodels.sandbox.distributions.extras as extras\n muStandardNormal=0.0 # mean of a standard normal distribution \n varStandardNormal=1.0 # variance of standard normal distribution \n upper=([d1,d2]) # upper bound for two values\n v=varStandardNormal # simplify our notations\n mu=muStandardNormal # simplify our notations\n covM=([v,rho],[rho,v])\n return extras.mvnormcdf(upper,mu,covM)", "def kde_convolution(x, bw, grid_edges, grid_counts, grid_len, bound_correction):\n # pylint: disable=too-many-arguments\n # Calculate relative frequencies per bin\n bin_width = grid_edges[1] - grid_edges[0]\n f = grid_counts / bin_width / len(x) \n\n # Bandwidth must consider the bin/grid width\n bw /= bin_width\n\n # Instantiate kernel signal. \n # `kernel_n` works perfectly, didn't know why until:\n # Read something that said ~3 times standard deviation on each tail,\n # which is roughly similar to 2 * pi = 6.28 for two tails.\n # See: https://stackoverflow.com/questions/2773606/gaussian-filter-in-matlab\n # Makes sense since almost all density is between \\pm 3 SDs\n kernel_n = int(bw * 2 * np.pi)\n kernel = gaussian(kernel_n, bw)\n\n if bound_correction:\n npad = int(grid_len / 5)\n f = np.concatenate([f[npad - 1:: -1], f, f[grid_len : grid_len - npad - 1: -1]])\n pdf = convolve(f, kernel, mode=\"same\", method=\"direct\")[npad : npad + grid_len]\n pdf /= bw * (2 * np.pi) ** 0.5\n else:\n pdf = convolve(f, kernel, mode=\"same\", method=\"direct\") / (bw * (2 * np.pi) ** 0.5) \n \n grid = (grid_edges[1:] + grid_edges[:-1]) / 2 \n return grid , pdf", "def KDE(x, (ll, ul)=('',''),res=1024.):\n #r.assign(\"x\", x)\n \n if ll :\n rn=arange(ll,ul,(ul-ll)/res)\n #print x.shape,rn.shape\n est = kde.gaussian_kde(x.ravel()).evaluate(rn)\n #r.assign(\"ll\", ll)\n #r.assign(\"ul\", ul)\n #est = r('density(x,from=ll, to=ul)') #trims the density borders\n else:\n ll = min(x)\n ul = max(x)\n rn=arange(ll,ul,(ul-ll)/res)\n est = kde.gaussian_kde(x).evaluate(rn)\n #est = r('density(x)')\n print 'No - KDE'\n return {'y':est,'x':rn}", "def gc_prob_density(r):\n return np.exp(_interp_ln_dens(r))", "def density_2d(self, x, y, Rs, rho0, gamma_inner, gamma_outer, center_x=0, center_y=0):\n x_ = x - center_x\n y_ = y - center_y\n R = np.sqrt(x_ ** 2 + y_ ** 2)\n x = R / Rs\n Fx = self._f(x, gamma_inner, gamma_outer)\n return 2 * rho0 * Rs * Fx", "def gauss_kernels(size, sigma=1.0):\n if size < 3:\n size = 3\n\n m = size / 2\n x, y = np.mgrid[-m:m + 1, -m:m + 1]\n kernel = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\n kernel_sum = kernel.sum()\n\n if not sum == 0:\n kernel = kernel / kernel_sum\n\n return kernel", "def get_density(matrix):\n return matrix.getnnz() / (matrix.shape[0] * matrix.shape[1])", "def get_gaussian(nsig=1.5, kernlen=13):\n\n interval = (2*nsig+1.)/(kernlen)\n x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)\n kern1d = np.diff(st.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw/kernel_raw.sum()\n return theano.shared(kernel.astype(\"float32\"), borrow=True)", "def _gaussian_kernel(self, xdata, l1, sigma_f, sigma_noise=2e-2):\n num_total_points = tf.shape(xdata)[1]\n\n # Expand and take the difference\n xdata1 = tf.expand_dims(xdata, axis=1) # [B, 1, num_total_points, x_size]\n xdata2 = tf.expand_dims(xdata, axis=2) # [B, num_total_points, 1, x_size]\n diff = xdata1 - xdata2 # [B, num_total_points, num_total_points, x_size]\n\n # [B, y_size, num_total_points, num_total_points, x_size]\n if self._kernel == 'PER':\n norm = 2*tf.square(tf.math.sin(3.14*diff[:, None, :, :, :])) / l1[:, :, None, None, :]\n norm = tf.reduce_sum(norm, -1) # [B, data_size, num_total_points, num_total_points]\n # [B, y_size, num_total_points, num_total_points]\n kernel = tf.square(sigma_f)[:, :, None, None] * tf.exp(-norm)\n\n else: # if kernel is normal gaussian\n norm = tf.square(diff[:, None, :, :, :] / l1[:, :, None, None, :])\n norm = tf.reduce_sum(norm, -1) # [B, data_size, num_total_points, num_total_points]\n # [B, y_size, num_total_points, num_total_points]\n kernel = tf.square(sigma_f)[:, :, None, None] * tf.exp(-0.5*norm)\n\n # Add some noise to the diagonal to make the cholesky work.\n kernel += (sigma_noise**2) * tf.eye(num_total_points)\n\n return kernel", "def gaussian_kernel(dim, sigma):\n kernel = np.zeros(dim)\n\n if dim%2 == 0:\n begin = dim//2-1\n else:\n begin = dim//2\n\n for i in range(dim):\n kernel[i] = gaussian(i-begin, sigma)\n\n return kernel", "def even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n\n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*legPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def test_MKDADensity_kernel_instance(testdata_cbma):\n kern = MKDAKernel(r=5)\n meta = MKDADensity(kern, null_method=\"montecarlo\", n_iters=10)\n results = meta.fit(testdata_cbma)\n assert isinstance(results, nimare.results.MetaResult)", "def __call__(self, X, Y=None, *, dist_kwargs=None, **kernel_kwargs):", "def apply_gaussian_resolution(self,params,data,fwhm=1,dE=0.01,E_max=100):\n print('\\n################### CONVOLUTION #####################\\n')\n print(f'\\n\\tConvolution with Gaussian function, FWHM = {fwhm} meV\\n')\n\n data.fwhm = fwhm\n c = fwhm/2.35482\n\n data.dE = dE\n data.E_max = E_max\n data.spectra_E = np.arange(0,data.E_max+data.dE,data.dE)\n data.spectra_num_E = len(data.spectra_E)\n data.spectra = np.zeros((data.spectra_num_E,params.num_Qpoints))\n data.smooth_spectra = np.zeros((data.spectra_num_E,params.num_Qpoints))\n structure_factors = []\n energies = []\n\n ### sum intensity of degenerate bands\n if params.sum_degenerate_bands == True:\n print('\\n\\tSumming degenerate bands before convolution (using convolution dE as tolerance)\\n')\n for q in range(params.num_Qpoints):\n sfac = data.structure_factors[:,q]\n energy = data.frequencies[f'{q}']\n reduced_energies = []\n summed_sfac = []\n while True:\n if len(energy) == 0:\n break\n test_energy = energy[0]\n reduced_energies.append(test_energy)\n indicies = np.intersect1d(np.argwhere(energy <= (test_energy+data.dE)),\n np.argwhere(energy > (test_energy-data.dE)))\n summed_sfac.append(sfac[indicies].sum())\n sfac = np.delete(sfac,indicies)\n energy = np.delete(energy,indicies)\n energies.append(reduced_energies)\n structure_factors.append(summed_sfac)\n else:\n print('\\n\\tWARNING: You should definitely sum degenerate bands!!!\\n')\n for q in range(params.num_Qpoints):\n energies.append(data.frequencies[f'{q}'])\n structure_factors.append(data.structure_factors[:,q])\n\n ### populate array for heatmap\n ### try statement takes care of negative energies\n for q in range(params.num_Qpoints):\n for b in range(len(structure_factors[q][:])):\n try: # if there are negative modes, argwhere returns an empty vector and the slice crashes\n data.spectra[np.argwhere(data.spectra_E <= \n energies[q][b]).max(),q] = structure_factors[q][b]\n except:\n continue\n\n if params.bose_factor == True:\n print('\\n\\tWARNING: Bose factor isnt verified. Need to compare to SNAXS.\\n')\n if params.temperature < 5:\n temperature = 5\n else:\n temperature = params.temperature\n inds = np.argwhere(data.spectra_E <= 0.5)\n tmp_e = np.copy(data.spectra_E)\n tmp_e[inds] = 0.5\n bose = 1+1/(np.exp(tmp_e/(constants.kb*1000*temperature))-1)\n bose = np.tile(bose.reshape((data.spectra_num_E,1)),reps=(1,params.num_Qpoints))\n data.spectra = np.multiply(data.spectra,bose)\n data.spectra = data.spectra/np.max(data.spectra)\n\n ### gaussian convolution using for loops, slow but very little memory utilization\n g_energy = np.append(data.spectra_E-data.spectra_E.max(),data.spectra_E[1:])\n gaussian = np.exp(-0.5*g_energy**2/c**2)/c/np.sqrt(2*np.pi)\n gaussian = np.tile(gaussian.reshape((gaussian.shape[0],1)),(1,data.num_Qpoints))\n tmp = np.append(data.spectra,data.spectra,axis=0)[1:,:]\n for e in range(data.spectra_num_E):\n if e%50 == 0:\n print(f'\\t------ {e}/{data.spectra_num_E} -------')\n data.smooth_spectra[e,:] = np.trapz(tmp*np.roll(gaussian,shift=e,axis=0),g_energy,axis=0)\n print('\\n\\tDone convolving!\\n')\n data.smooth_spectra = data.smooth_spectra/np.max(data.smooth_spectra)\n\n# if params.random_background == True:\n# data.smooth_spectra = data.smooth_spectra+(np.random.normal(0,1,\n# (data.smooth_spectra.shape[0],data.smooth_spectra.shape[1])))*0.001\n \n plt.imshow(data.smooth_spectra,origin='lower',aspect='auto',cmap='hot')\n plt.show()", "def test_density(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.density[0], 2.26666666666663)", "def rmse_cdf(xs,ys):\n\n data_range = list(set(xs+ys))\n cdf_x = cum_density_func(xs, norm=True, data_range=data_range)\n cdf_y = cum_density_func(ys, norm=True, data_range=data_range)\n return rmse(cdf_x, cdf_y, include_absent=False)", "def density(self, x):\n\t\tN = len(self.train_data)\n\t\tpoints = list(self.train_data)\n\t\tdists = [np.linalg.norm(x-point)**2 for point in points]\n\t\texps = [np.exp(-dist / (2 * (self.bandwidth ** 2))) for dist in dists]\n\t\tunnormalized_sum = sum(exps)\n\t\tprobability = (1 / N) * self.normalizing_constant() * unnormalized_sum\n\t\treturn probability", "def get_density(xs, ys, mu, sigma, DIMENSION=2):\n return np.array([[kde(np.array([x,y]), mu, sigma, DIMENSION) for x in xs] for y in ys])", "def gaussian_kernel(shape: Tuple[int, int]=(3, 3), sigma: float=0.5):\n m, n = [int((ss - 1.) / 2.) for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n kernel = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n kernel[kernel < np.finfo(kernel.dtype).eps * kernel.max()] = 0\n sumh = kernel.sum()\n if sumh != 0:\n kernel /= sumh\n return kernel", "def GaussianKernel(shape=(3, 3), sigma=0.5):\r\n radius_x, radius_y = [(radius-1.)/2. for radius in shape]\r\n y_range, x_range = np.ogrid[-radius_y:radius_y+1, -radius_x:radius_x+1]\r\n h = np.exp(- (x_range*x_range + y_range*y_range) / (2.*sigma*sigma))\r\n h[h < np.finfo(h.dtype).eps*h.max()] = 0\r\n sumofh = h.sum()\r\n if sumofh != 0:\r\n h /= sumofh\r\n return h", "def gauss_smooth(data, sigma):\n\t\t\t# make the kernel 5 sigmas wide in each direction\n\t\t\tkernel = stats.norm.pdf(np.arange(-5*sigma, (5*sigma)+1), scale=sigma)\n\t\t\t\n\t\t\treturn sp.ndimage.convolve1d(data, kernel, axis=2)", "def gaussian_kernel(sigma, truncate=4.0):\n\n sigma = float(sigma)\n radius = int(truncate * sigma + 0.5)\n\n x, y = np.mgrid[-radius:radius + 1, -radius:radius + 1]\n sigma = sigma**2\n\n k = 2 * np.exp(-0.5 * (x**2 + y**2) / sigma)\n k = k / np.sum(k)\n\n return k", "def moffat_kernel(n_fwhm,beta,r_s):\n\n x_length = int(n_rs * r_s + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n\n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n\t\n m = 1. /((1+(x**2+y**2)/r_s**2)**beta)\n\t\t\n\n return m / m.sum()", "def get_fiber_density():\n return Global_Module.global_fiber_density", "def _multivariate_gaussian(self, x, mu_k, sigma_k):\n return multivariate_normal.pdf(x, mu_k, sigma_k)", "def gaussian_kernel(size, sigma):\n\n m, n = [(s - 1.) / 2. for s in size]\n y, x = np.ogrid[-m:m+1, -n:n+1]\n h = np.exp(-(x*x + y*y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps*h.max()] = 0\n sumh = h.sum()\n if sumh != 0: h /= sumh\n return h", "def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)", "def _normal_distribution_cdf(x, stddev):\n return 0.5 * (1.0 + tf.erf(x / (math.sqrt(2) * stddev + 1e-20)))", "def test_density_2d(self):\n # almost spherical case\n x = 1.\n y = 1.\n e1, e2 = 5e-5, 0.\n sigma = 1.\n amp = 2.\n f_ = self.gaussian_kappa_ellipse.density_2d(x, y, amp, sigma, e1, e2)\n f_sphere = amp / (2.*np.pi*sigma**2) * np.exp(-(x*x+y*y)/2./sigma**2)\n npt.assert_almost_equal(f_, f_sphere, decimal=4)" ]
[ "0.6899102", "0.68290377", "0.6813299", "0.68107426", "0.66186744", "0.66110486", "0.6379504", "0.6353802", "0.6331665", "0.62971747", "0.62869173", "0.6239997", "0.623953", "0.6189176", "0.6178441", "0.6158369", "0.6149099", "0.61140794", "0.6109274", "0.6099322", "0.6058081", "0.6015082", "0.6010351", "0.60073364", "0.6004894", "0.6000507", "0.5989932", "0.59830326", "0.5978582", "0.59382045", "0.5923472", "0.5916415", "0.5907154", "0.5900273", "0.5896372", "0.5879446", "0.587772", "0.58763325", "0.58686256", "0.5865615", "0.5841993", "0.582814", "0.5827112", "0.5824393", "0.58242565", "0.5823167", "0.58040714", "0.5802314", "0.57969093", "0.579548", "0.5795294", "0.57930595", "0.57927895", "0.57883054", "0.57883054", "0.57815", "0.57760626", "0.5772277", "0.5769526", "0.57680464", "0.57640696", "0.57615805", "0.57366914", "0.5731549", "0.5731075", "0.57263637", "0.5724637", "0.5718049", "0.57080984", "0.5705914", "0.5703738", "0.56910014", "0.56866336", "0.56866306", "0.5679128", "0.5670464", "0.56654364", "0.56649643", "0.56575716", "0.56520766", "0.5649844", "0.56475335", "0.56457525", "0.56395143", "0.5627203", "0.56235754", "0.56014746", "0.5599398", "0.5598865", "0.5596671", "0.55931526", "0.55896145", "0.55896014", "0.55881095", "0.5583752", "0.5582239", "0.5580435", "0.5580295", "0.5579479", "0.5576446" ]
0.6502326
6
Adds a record that a certain peer has a block.
def peer_has_block( self, header_hash: bytes32, peer_id: bytes32, weight: uint128, height: uint32, new_peak: bool ) -> None: if self.target_peak is not None and header_hash == self.target_peak.header_hash: self.peers_changed.set() if header_hash in self.peak_to_peer: self.peak_to_peer[header_hash].add(peer_id) else: self.peak_to_peer[header_hash] = {peer_id} if len(self.peak_to_peer) > 256: # nice power of two item = self.peak_to_peer.popitem(last=False) # Remove the oldest entry # sync target hash is used throughout the sync process and should not be deleted. if self.target_peak is not None and item[0] == self.target_peak.header_hash: self.peak_to_peer[item[0]] = item[1] # Put it back in if it was the sync target self.peak_to_peer.popitem(last=False) # Remove the oldest entry again if new_peak: self.peer_to_peak[peer_id] = Peak(header_hash, height, weight)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def announce_new_block(block):\n for peer in peers:\n url = \"{}add_block\".format(peer)\n requests.post(url, data=json.dumps(block.__dict__, sort_keys=True))", "def add_block(self, block, proof):\n previous_hash = self.last_block.hash\n\n if previous_hash != block.previous_hash:\n print('Previous hash is not matched.')\n return False\n\n if not Blockchain.is_valid_proof(block, proof):\n print('Proof is not valid.')\n return False\n\n block.hash = proof\n self.chain.append(block)\n return True", "def announce_new_block(block):\n for peer in peers:\n url = \"{}add_block\".format(peer)\n headers = {'Content-Type': \"application/json\"}\n requests.post(url,\n data=json.dumps(block.__dict__, sort_keys=True),\n headers=headers)", "def add_block(self, block, proof):\n previous_hash = self.last_block.hash\n \n if previous_hash != block.previous: \n return False\n \n if not Blockchain.is_valid_proof(block, proof):\n return False\n \n block.hash = proof \n self.chain.append(block)\n return True", "def announce_new_block(block):\n for peer in peers:\n url = \"{}/add_block\".format(peer)\n headers = {'Content-Type': \"application/json\"}\n requests.post(url,\n data=json.dumps(block.__dict__, sort_keys=True),\n headers=headers)", "def add_block(self, block, proof):\n # print(block.__dict__)\n try:\n previous_hash = self.last_block.hash\n except AttributeError:\n previous_hash = block.previous_hash\n\n if previous_hash != block.previous_hash:\n print(\"Hashes don't match\\n{}\\n{}\".format(previous_hash, block.previous_hash))\n return False\n\n if not self.is_valid_proof(block, proof):\n print(\"block is not valid\")\n return False\n\n block.hash = proof\n self.chain.append(block)\n return True", "def add_block(self, block, proof):\r\n previous_hash = self.last_block.hash\r\n if previous_hash != block.previous_hash:\r\n return False\r\n if not self.is_valid_proof(block, proof):\r\n return False\r\n block.hash = proof\r\n self.chain.append(block)\r\n return True", "def add_block(self, block, proof):\n previous_hash = self.last_block.hash\n\n if previous_hash != block.previous_hash:\n return False\n\n if not Blockchain.is_valid_proof(block, proof):\n return False\n\n block.hash = proof\n self.chain.append(block)\n return True", "def add_to_chain(self, block: Block):\n if self.proof_of_work(block):\n self.blocks.append(block)", "def addBlock(self, newBlock):\n newBlock.index = len(self.chain)\n newBlock.previousHash = self.chain[-1].hash\n newBlock.mineBlock(self.difficulty)\n self.chain.append(newBlock)\n self.writeBlocks()", "def add_block(self, block):\n if block.index >= len(self.blockchain):\n self.blockchain.append(block)\n else:\n self.blockchain[block.index] = block\n self.write_to_disk()", "def add(self, block: Block):\n self._buffer.append(block)", "def addBlock(self, block):\n if self.validateBlock(block, self.getLastBlock()):\n self.__chain.append(block)\n self.__currentTransactionsList = [] # Remove transactions from the list\n return True\n return False", "def _add_block(self, block: Block, write_to_ledger, mined_ourselves):\n\n with self.lock:\n self.mining_flag = GIVEN_BLOCK\n\n with self.lock:\n if block.parent_hash in self.blocks:\n parent_node = self.blocks[block.parent_hash]\n block_node = BlockNode(block, parent_node)\n parent_node.add_child(block_node)\n\n self._update_latest_pointers(block_node) # Check if the new block makes a longer chain and switch to it\n\n self.log.debug(GREEN + \"%s:[%s] added block to fork %d at depth %d\" + NC, block.miner_key_hash[:6], time.ctime(block.create_time), block_node.fork_num, block_node.depth)\n # self.log.debug(\"Added block to blockchain\")\n elif block.is_root:\n block_node = BlockNode(block, None)\n self.root = block_node\n # self.log.debug(\"Added block as root\")\n self.log.debug(GREEN + \"%s:[%s] added block as root %d\" + NC, block.miner_key_hash[:6], time.ctime(block.create_time), block_node.tree_num)\n self._update_latest_pointers(block_node)\n # self.messages.clear()\n Blockchain.num_trees += 1\n\n self._add_block_msgs(block) # Add all new posts to message table\n if self.message_num % 10000 == 0:\n self._write_new_messages(self.message_num-10000) # Save new messages to file\n self.blocks[block.block_hash] = block_node\n self.total_blocks += 1\n\n # Update ledger.txt with newly added block\n if write_to_ledger:\n with open(self.ledger_file, 'a') as ledger:\n ledger.write(repr(block) + \"\\n\")\n\n if self.total_blocks % STATS_UPDATE_INTERVAL == 0: # Every few blocks update stats.txt\n self._write_stats_file()\n\n self.mining_flag = CONTINUE_MINING\n\n if not mined_ourselves:\n self._update_msg_queue(block)\n\n if time.time() - self.last_msg_update > MSG_UPDATE_DELAY:\n self._reinit_message_table()\n self.last_msg_update = time.time()\n\n return True", "async def add_block(\n self,\n block: FullBlock,\n peer: Optional[WSChiaConnection] = None,\n raise_on_disconnected: bool = False,\n ) -> Optional[Message]:\n if self.sync_store.get_sync_mode():\n return None\n\n # Adds the block to seen, and check if it's seen before (which means header is in memory)\n header_hash = block.header_hash\n if self.blockchain.contains_block(header_hash):\n return None\n\n pre_validation_result: Optional[PreValidationResult] = None\n if (\n block.is_transaction_block()\n and block.transactions_info is not None\n and block.transactions_info.generator_root != bytes([0] * 32)\n and block.transactions_generator is None\n ):\n # This is the case where we already had the unfinished block, and asked for this block without\n # the transactions (since we already had them). Therefore, here we add the transactions.\n unfinished_rh: bytes32 = block.reward_chain_block.get_unfinished().get_hash()\n unf_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(unfinished_rh)\n if (\n unf_block is not None\n and unf_block.transactions_generator is not None\n and unf_block.foliage_transaction_block == block.foliage_transaction_block\n ):\n # We checked that the transaction block is the same, therefore all transactions and the signature\n # must be identical in the unfinished and finished blocks. We can therefore use the cache.\n pre_validation_result = self.full_node_store.get_unfinished_block_result(unfinished_rh)\n assert pre_validation_result is not None\n block = dataclasses.replace(\n block,\n transactions_generator=unf_block.transactions_generator,\n transactions_generator_ref_list=unf_block.transactions_generator_ref_list,\n )\n else:\n # We still do not have the correct information for this block, perhaps there is a duplicate block\n # with the same unfinished block hash in the cache, so we need to fetch the correct one\n if peer is None:\n return None\n\n block_response: Optional[Any] = await peer.call_api(\n FullNodeAPI.request_block, full_node_protocol.RequestBlock(block.height, True)\n )\n if block_response is None or not isinstance(block_response, full_node_protocol.RespondBlock):\n self.log.warning(\n f\"Was not able to fetch the correct block for height {block.height} {block_response}\"\n )\n return None\n new_block: FullBlock = block_response.block\n if new_block.foliage_transaction_block != block.foliage_transaction_block:\n self.log.warning(f\"Received the wrong block for height {block.height} {new_block.header_hash}\")\n return None\n assert new_block.transactions_generator is not None\n\n self.log.debug(\n f\"Wrong info in the cache for bh {new_block.header_hash}, there might be multiple blocks from the \"\n f\"same farmer with the same pospace.\"\n )\n # This recursion ends here, we cannot recurse again because transactions_generator is not None\n return await self.add_block(new_block, peer)\n state_change_summary: Optional[StateChangeSummary] = None\n ppp_result: Optional[PeakPostProcessingResult] = None\n async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):\n # After acquiring the lock, check again, because another asyncio thread might have added it\n if self.blockchain.contains_block(header_hash):\n return None\n validation_start = time.time()\n # Tries to add the block to the blockchain, if we already validated transactions, don't do it again\n npc_results = {}\n if pre_validation_result is not None and pre_validation_result.npc_result is not None:\n npc_results[block.height] = pre_validation_result.npc_result\n\n # Don't validate signatures because we want to validate them in the main thread later, since we have a\n # cache available\n pre_validation_results = await self.blockchain.pre_validate_blocks_multiprocessing(\n [block], npc_results, validate_signatures=False\n )\n added: Optional[AddBlockResult] = None\n pre_validation_time = time.time() - validation_start\n try:\n if len(pre_validation_results) < 1:\n raise ValueError(f\"Failed to validate block {header_hash} height {block.height}\")\n if pre_validation_results[0].error is not None:\n if Err(pre_validation_results[0].error) == Err.INVALID_PREV_BLOCK_HASH:\n added = AddBlockResult.DISCONNECTED_BLOCK\n error_code: Optional[Err] = Err.INVALID_PREV_BLOCK_HASH\n elif Err(pre_validation_results[0].error) == Err.TIMESTAMP_TOO_FAR_IN_FUTURE:\n raise TimestampError()\n else:\n raise ValueError(\n f\"Failed to validate block {header_hash} height \"\n f\"{block.height}: {Err(pre_validation_results[0].error).name}\"\n )\n else:\n result_to_validate = (\n pre_validation_results[0] if pre_validation_result is None else pre_validation_result\n )\n assert result_to_validate.required_iters == pre_validation_results[0].required_iters\n (added, error_code, state_change_summary) = await self.blockchain.add_block(\n block, result_to_validate, None\n )\n if added == AddBlockResult.ALREADY_HAVE_BLOCK:\n return None\n elif added == AddBlockResult.INVALID_BLOCK:\n assert error_code is not None\n self.log.error(f\"Block {header_hash} at height {block.height} is invalid with code {error_code}.\")\n raise ConsensusError(error_code, [header_hash])\n elif added == AddBlockResult.DISCONNECTED_BLOCK:\n self.log.info(f\"Disconnected block {header_hash} at height {block.height}\")\n if raise_on_disconnected:\n raise RuntimeError(\"Expected block to be added, received disconnected block.\")\n return None\n elif added == AddBlockResult.NEW_PEAK:\n # Only propagate blocks which extend the blockchain (becomes one of the heads)\n assert state_change_summary is not None\n ppp_result = await self.peak_post_processing(block, state_change_summary, peer)\n\n elif added == AddBlockResult.ADDED_AS_ORPHAN:\n self.log.info(\n f\"Received orphan block of height {block.height} rh {block.reward_chain_block.get_hash()}\"\n )\n else:\n # Should never reach here, all the cases are covered\n raise RuntimeError(f\"Invalid result from add_block {added}\")\n except asyncio.CancelledError:\n # We need to make sure to always call this method even when we get a cancel exception, to make sure\n # the node stays in sync\n if added == AddBlockResult.NEW_PEAK:\n assert state_change_summary is not None\n await self.peak_post_processing(block, state_change_summary, peer)\n raise\n\n validation_time = time.time() - validation_start\n\n if ppp_result is not None:\n assert state_change_summary is not None\n await self.peak_post_processing_2(block, peer, state_change_summary, ppp_result)\n\n percent_full_str = (\n (\n \", percent full: \"\n + str(round(100.0 * float(block.transactions_info.cost) / self.constants.MAX_BLOCK_COST_CLVM, 3))\n + \"%\"\n )\n if block.transactions_info is not None\n else \"\"\n )\n self.log.log(\n logging.WARNING if validation_time > 2 else logging.DEBUG,\n f\"Block validation time: {validation_time:0.2f} seconds, \"\n f\"pre_validation time: {pre_validation_time:0.2f} seconds, \"\n f\"cost: {block.transactions_info.cost if block.transactions_info is not None else 'None'}\"\n f\"{percent_full_str} header_hash: {header_hash} height: {block.height}\",\n )\n\n # This code path is reached if added == ADDED_AS_ORPHAN or NEW_TIP\n peak = self.blockchain.get_peak()\n assert peak is not None\n\n # Removes all temporary data for old blocks\n clear_height = uint32(max(0, peak.height - 50))\n self.full_node_store.clear_candidate_blocks_below(clear_height)\n self.full_node_store.clear_unfinished_blocks_below(clear_height)\n if peak.height % 1000 == 0 and not self.sync_store.get_sync_mode():\n await self.sync_store.clear_sync_info() # Occasionally clear sync peer info\n\n state_changed_data: Dict[str, Any] = {\n \"transaction_block\": False,\n \"k_size\": block.reward_chain_block.proof_of_space.size,\n \"header_hash\": block.header_hash,\n \"height\": block.height,\n \"validation_time\": validation_time,\n \"pre_validation_time\": pre_validation_time,\n }\n\n if block.transactions_info is not None:\n state_changed_data[\"transaction_block\"] = True\n state_changed_data[\"block_cost\"] = block.transactions_info.cost\n state_changed_data[\"block_fees\"] = block.transactions_info.fees\n\n if block.foliage_transaction_block is not None:\n state_changed_data[\"timestamp\"] = block.foliage_transaction_block.timestamp\n\n if block.transactions_generator is not None:\n state_changed_data[\"transaction_generator_size_bytes\"] = len(bytes(block.transactions_generator))\n\n state_changed_data[\"transaction_generator_ref_list\"] = block.transactions_generator_ref_list\n if added is not None:\n state_changed_data[\"receive_block_result\"] = added.value\n\n self._state_changed(\"block\", state_changed_data)\n\n record = self.blockchain.block_record(block.header_hash)\n if self.weight_proof_handler is not None and record.sub_epoch_summary_included is not None:\n if self._segment_task is None or self._segment_task.done():\n self._segment_task = asyncio.create_task(self.weight_proof_handler.create_prev_sub_epoch_segments())\n return None", "def validate_and_add_block(self, block):\n\n # 1. Validate transaction(s) in block\n tx = block.transactions\n if not self.validate_transaction(tx):\n print(\"Block contains invalid transactions.\")\n return\n\n # 2. Hash transaction(s)\n tx_hash = HashAssist.hash_value(value=tx.to_string_for_hashing())\n\n # 3. Validate header\n header_string = block.prev_hash + tx_hash + block.nonce\n header_hash = HashAssist.hash_value(header_string)\n if not block.header_hash == header_hash:\n print(\"Block header invalid!\")\n return\n\n self.blocks.append(block)", "def append_block_dict(self, record):\n for block in record._blocks:\n self._block_dict[block].append(record)", "def add(self, record):\n if record.name != 'consensus':\n self.members.append(record)", "async def add_unfinished_block(\n self,\n block: UnfinishedBlock,\n peer: Optional[WSChiaConnection],\n farmed_block: bool = False,\n block_bytes: Optional[bytes] = None,\n ) -> None:\n receive_time = time.time()\n\n if block.prev_header_hash != self.constants.GENESIS_CHALLENGE and not self.blockchain.contains_block(\n block.prev_header_hash\n ):\n # No need to request the parent, since the peer will send it to us anyway, via NewPeak\n self.log.debug(\"Received a disconnected unfinished block\")\n return None\n\n # Adds the unfinished block to seen, and check if it's seen before, to prevent\n # processing it twice. This searches for the exact version of the unfinished block (there can be many different\n # foliages for the same trunk). This is intentional, to prevent DOS attacks.\n # Note that it does not require that this block was successfully processed\n if self.full_node_store.seen_unfinished_block(block.get_hash()):\n return None\n\n block_hash = block.reward_chain_block.get_hash()\n\n # This searched for the trunk hash (unfinished reward hash). If we have already added a block with the same\n # hash, return\n if self.full_node_store.get_unfinished_block(block_hash) is not None:\n return None\n\n peak: Optional[BlockRecord] = self.blockchain.get_peak()\n if peak is not None:\n if block.total_iters < peak.sp_total_iters(self.constants):\n # This means this unfinished block is pretty far behind, it will not add weight to our chain\n return None\n\n if block.prev_header_hash == self.constants.GENESIS_CHALLENGE:\n prev_b = None\n else:\n prev_b = self.blockchain.block_record(block.prev_header_hash)\n\n # Count the blocks in sub slot, and check if it's a new epoch\n if len(block.finished_sub_slots) > 0:\n num_blocks_in_ss = 1 # Curr\n else:\n curr = self.blockchain.try_block_record(block.prev_header_hash)\n num_blocks_in_ss = 2 # Curr and prev\n while (curr is not None) and not curr.first_in_sub_slot:\n curr = self.blockchain.try_block_record(curr.prev_hash)\n num_blocks_in_ss += 1\n\n if num_blocks_in_ss > self.constants.MAX_SUB_SLOT_BLOCKS:\n # TODO: potentially allow overflow blocks here, which count for the next slot\n self.log.warning(\"Too many blocks added, not adding block\")\n return None\n\n # The clvm generator and aggregate signature are validated outside of the lock, to allow other blocks and\n # transactions to get validated\n npc_result: Optional[NPCResult] = None\n pre_validation_time = None\n\n async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):\n start_header_time = time.time()\n _, header_error = await self.blockchain.validate_unfinished_block_header(block)\n if header_error is not None:\n if header_error == Err.TIMESTAMP_TOO_FAR_IN_FUTURE:\n raise TimestampError()\n else:\n raise ConsensusError(header_error)\n validate_time = time.time() - start_header_time\n self.log.log(\n logging.WARNING if validate_time > 2 else logging.DEBUG,\n f\"Time for header validate: {validate_time:0.3f}s\",\n )\n\n if block.transactions_generator is not None:\n pre_validation_start = time.time()\n assert block.transactions_info is not None\n try:\n block_generator: Optional[BlockGenerator] = await self.blockchain.get_block_generator(block)\n except ValueError:\n raise ConsensusError(Err.GENERATOR_REF_HAS_NO_GENERATOR)\n if block_generator is None:\n raise ConsensusError(Err.GENERATOR_REF_HAS_NO_GENERATOR)\n if block_bytes is None:\n block_bytes = bytes(block)\n\n height = uint32(0) if prev_b is None else uint32(prev_b.height + 1)\n npc_result = await self.blockchain.run_generator(block_bytes, block_generator, height)\n pre_validation_time = time.time() - pre_validation_start\n\n # blockchain.run_generator throws on errors, so npc_result is\n # guaranteed to represent a successful run\n assert npc_result.conds is not None\n pairs_pks, pairs_msgs = pkm_pairs(npc_result.conds, self.constants.AGG_SIG_ME_ADDITIONAL_DATA)\n if not cached_bls.aggregate_verify(\n pairs_pks, pairs_msgs, block.transactions_info.aggregated_signature, True\n ):\n raise ConsensusError(Err.BAD_AGGREGATE_SIGNATURE)\n\n async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):\n # TODO: pre-validate VDFs outside of lock\n validation_start = time.time()\n validate_result = await self.blockchain.validate_unfinished_block(block, npc_result)\n if validate_result.error is not None:\n if validate_result.error == Err.COIN_AMOUNT_NEGATIVE.value:\n # TODO: remove in the future, hotfix for 1.1.5 peers to not disconnect older peers\n self.log.info(f\"Consensus error {validate_result.error}, not disconnecting\")\n return\n raise ConsensusError(Err(validate_result.error))\n validation_time = time.time() - validation_start\n\n # respond_block will later use the cache (validated_signature=True)\n validate_result = dataclasses.replace(validate_result, validated_signature=True)\n\n assert validate_result.required_iters is not None\n\n # Perform another check, in case we have already concurrently added the same unfinished block\n if self.full_node_store.get_unfinished_block(block_hash) is not None:\n return None\n\n if block.prev_header_hash == self.constants.GENESIS_CHALLENGE:\n height = uint32(0)\n else:\n height = uint32(self.blockchain.block_record(block.prev_header_hash).height + 1)\n\n ses: Optional[SubEpochSummary] = next_sub_epoch_summary(\n self.constants,\n self.blockchain,\n validate_result.required_iters,\n block,\n True,\n )\n\n self.full_node_store.add_unfinished_block(height, block, validate_result)\n pre_validation_log = (\n f\"pre_validation time {pre_validation_time:0.4f}, \" if pre_validation_time is not None else \"\"\n )\n if farmed_block is True:\n self.log.info(\n f\"🍀 ️Farmed unfinished_block {block_hash}, SP: {block.reward_chain_block.signage_point_index}, \"\n f\"validation time: {validation_time:0.4f} seconds, {pre_validation_log}\"\n f\"cost: {block.transactions_info.cost if block.transactions_info else 'None'} \"\n )\n else:\n percent_full_str = (\n (\n \", percent full: \"\n + str(round(100.0 * float(block.transactions_info.cost) / self.constants.MAX_BLOCK_COST_CLVM, 3))\n + \"%\"\n )\n if block.transactions_info is not None\n else \"\"\n )\n self.log.info(\n f\"Added unfinished_block {block_hash}, not farmed by us,\"\n f\" SP: {block.reward_chain_block.signage_point_index} farmer response time: \"\n f\"{receive_time - self.signage_point_times[block.reward_chain_block.signage_point_index]:0.4f}, \"\n f\"Pool pk {encode_puzzle_hash(block.foliage.foliage_block_data.pool_target.puzzle_hash, 'xch')}, \"\n f\"validation time: {validation_time:0.4f} seconds, {pre_validation_log}\"\n f\"cost: {block.transactions_info.cost if block.transactions_info else 'None'}\"\n f\"{percent_full_str}\"\n )\n\n sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(\n self.constants,\n len(block.finished_sub_slots) > 0,\n prev_b,\n self.blockchain,\n )\n\n if block.reward_chain_block.signage_point_index == 0:\n res = self.full_node_store.get_sub_slot(block.reward_chain_block.pos_ss_cc_challenge_hash)\n if res is None:\n if block.reward_chain_block.pos_ss_cc_challenge_hash == self.constants.GENESIS_CHALLENGE:\n rc_prev = self.constants.GENESIS_CHALLENGE\n else:\n self.log.warning(f\"Do not have sub slot {block.reward_chain_block.pos_ss_cc_challenge_hash}\")\n return None\n else:\n rc_prev = res[0].reward_chain.get_hash()\n else:\n assert block.reward_chain_block.reward_chain_sp_vdf is not None\n rc_prev = block.reward_chain_block.reward_chain_sp_vdf.challenge\n\n timelord_request = timelord_protocol.NewUnfinishedBlockTimelord(\n block.reward_chain_block,\n difficulty,\n sub_slot_iters,\n block.foliage,\n ses,\n rc_prev,\n )\n\n timelord_msg = make_msg(ProtocolMessageTypes.new_unfinished_block_timelord, timelord_request)\n await self.server.send_to_all([timelord_msg], NodeType.TIMELORD)\n\n full_node_request = full_node_protocol.NewUnfinishedBlock(block.reward_chain_block.get_hash())\n msg = make_msg(ProtocolMessageTypes.new_unfinished_block, full_node_request)\n if peer is not None:\n await self.server.send_to_all([msg], NodeType.FULL_NODE, peer.peer_node_id)\n else:\n await self.server.send_to_all([msg], NodeType.FULL_NODE)\n\n self._state_changed(\"unfinished_block\")", "def add_peer(self, peer_id, peer_ip):\n self.peers.update({peer_id: peer.Peer(peer_ip)})", "def add_peer_node(self, node): \n self.__peer_nodes.add(node) \n self.save_data()", "def addBlock(self, block, unSpentTransactions):\n if self.isValidBlock(block, unSpentTransactions):\n self.chain[block.currHash] = block\n self.tailBlockHash = block.currHash\n return True\n return False", "def add_peer_node(self, node):\n self.__peer_nodes.add(node)\n self.save_data()", "def add_peer_node(self, node):\n self.__peer_nodes.add(node)\n self.save_data()", "def add_block(self, block):\n # Create a list of transaction objects\n transactions = [Transaction(\n tx['sender'], tx['recipient'], tx['signature'], tx['amount']) for tx in block['transactions']] \n chipsactions = [Chipsaction(\n tx['sender'], tx['recipient'], tx['follow'], tx['message'], tx['signature'], tx['amount']) for tx in block['chipsactions']] \n messsactions = [Messsaction(\n tx['sender'], tx['follower'], tx['message'], tx['signature']) for tx in block['messsactions']] \n # Validate the proof of work of the block and store the result (True or False) in a variable\n proof_is_valid = Verification.valid_proof(\n transactions[:-1], chipsactions, messsactions, block['previous_hash'], block['proof'])\n # Check if previous_hash stored in the block is equal to the local blockchain's last block's hash and store the result in a block\n hashes_match = hash_block(self.chain[-1]) == block['previous_hash']\n if not proof_is_valid or not hashes_match:\n return False\n # Create a Block object\n converted_block = Block(\n block['index'], block['previous_hash'], transactions, chipsactions, messsactions, block['proof'], block['timestamp'])\n self.__chain.append(converted_block)\n stored_transactions = self.__open_transactions[:]\n stored_chipsactions = self.__open_chipsactions[:]\n stored_messsactions = self.__open_messsactions[:]\n # Check which open transactions were included in the received block and remove them\n # This could be improved by giving each transaction an ID that would uniquely identify it\n for itx in block['transactions']:\n for opentx in stored_transactions:\n if opentx.sender == itx['sender'] and opentx.recipient == itx['recipient'] and opentx.amount == itx['amount'] and opentx.signature == itx['signature']:\n try:\n self.__open_transactions.remove(opentx)\n except ValueError:\n print('Item was already removed')\n\n for itx in block['chipsactions']:\n for opentx in stored_chipsactions:\n if opentx.sender == itx['sender'] and opentx.recipient == itx['recipient'] and opentx.sender == itx['follow'] and opentx.recipient == itx['message'] and opentx.amount == itx['amount'] and opentx.signature == itx['signature']:\n try:\n self.__open_chipsactions.remove(opentx)\n except ValueError:\n print('Item was already removed')\n for itx in block['messsactions']:\n for opentx in stored_messsactions:\n if opentx.sender == itx['sender'] and opentx.sender == itx['follower'] and opentx.recipient == itx['message'] and opentx.signature == itx['signature']:\n try:\n self.__open_messsactions.remove(opentx)\n except ValueError:\n print('Item was already removed')\n self.save_data()\n return True", "def add_block(self, block, save=True):\n if block.hash in self.blocks:\n return False\n if not block.is_valid()[0]:\n return False\n if not block.height in self.chain:\n self.chain[block.height] = []\n if not block.hash in self.chain[block.height]:\n # add newer blocks to front so they show up first in UI\n self.chain[block.height] = [block.hash] + self.chain[block.height]\n if not block.hash in self.blocks:\n self.blocks[block.hash] = block\n for tx in block.transactions:\n self.all_transactions[tx.hash] = tx\n if not tx.hash in self.blocks_containing_tx:\n self.blocks_containing_tx[tx.hash] = []\n self.blocks_containing_tx[tx.hash].append(block.hash)\n for input_ref in tx.input_refs:\n if not input_ref in self.blocks_spending_input:\n self.blocks_spending_input[input_ref] = []\n self.blocks_spending_input[input_ref].append(block.hash)\n self._p_changed = True # Marked object as changed so changes get saved to ZODB.\n if save:\n transaction.commit() # If we're going to save the block, commit the transaction.\n return True", "def add_block(block_id):\n if not g.user or g.user.id == block_id:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n blocked_user = User.query.get_or_404(block_id)\n g.user.blocked_users.append(blocked_user)\n\n if g.user.is_following(blocked_user):\n g.user.following.remove(blocked_user)\n \n if blocked_user.is_following(g.user):\n blocked_user.following.remove(g.user)\n\n\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/blocked-users\")", "def test_duplicate_peer(self):\n\n\t\tself.db = {'test_hash': [('test', '100.100.100.100', 1000)]}\n\t\ttracker.add_peer(self.db, \\\n\t\t\t\"test_hash\", \"test\", \"100.100.100.100\", 1000)\n\t\tself.assertEqual(self.db, \\\n\t\t\t{'test_hash': [('test', '100.100.100.100', 1000)]})", "def add_new_block(self):\n old_block = self.curr_block\n self.curr_block = self.gen_new_block()\n add_edge(old_block, self.curr_block)", "def addPeer(self, peerType, peerId):\r\n raise NotImplementedError()", "def route_mine_block():\n transaction_data = transaction_pool.transaction_data()\n transaction_data.append(Transaction.reward(wallet).to_json())\n blockchain.add_block(transaction_data)\n block = blockchain.chain[-1]\n pubsub.broadcast_block(block)\n transaction_pool.clear_transactions_added_to_blockchain(blockchain)\n return jsonify(block.to_json())", "def add_blocker(self, face):\n new_blocker = blocker_string(face, self.relative_level)\n if new_blocker not in self.blockers:\n self.blockers.append(new_blocker)", "def add_block(self, block_pf):\n\n # test si il s'agit du bloc genesis\n if len(self.blocks) != 0:\n # check si previous H est coherent avant ajout a chaine\n if self.check_previousBlockH(block_pf.header['prevBlockH']):\n self.blocks.append(block_pf)\n else:\n print \"== Probleme de parent\"\n print \"= %s\" % block_pf.header['prevBlockH']\n print \"= %s\" % getHashBlock(self.get_topBlock())\n else:\n self.blocks.append(block_pf)", "def add_new_peer(self, peer_id, peer_host, port):\n if peer_id not in self.chain_instance.peer_connect_dict:\n self.chain_instance.peer_connect_dict[peer_id] = {'host': peer_host, 'port': port}", "def addBlock(self, data):\n #get the hashVal of last block in blockchain\n lastHash = self.chain[len(self.chain) - 1].hashVal\n timestamp = time()\n hashVal = Block.hashSHA(timestamp, lastHash, data, NONCE, DIFFICULTY)\n adding_block = Block(timestamp, lastHash, hashVal, data, NONCE, DIFFICULTY)\n \n self.chain.append(adding_block)\n return adding_block", "def add_chat_block(self, vapor_id_or_ip, block_type, duration, unit_time, reason):\n identity = vapor_id_or_ip if len(vapor_id_or_ip) == 36 else vapor_id_or_ip.split(\":\")[0] \\\n if ':' in vapor_id_or_ip else vapor_id_or_ip\n cmd = '{}addChatBlock {} {} {} {} \"{}\"'.format(self.console, identity, block_type, duration,\n unit_time, Commands.aquote(reason))\n self.write_command(cmd)", "def mine():\n last_block = blockchain.get_last_block\n print(last_block)\n last_proof = last_block['proof']\n proof = blockchain.proof_of_work(last_proof)\n\n blockchain.add_transaction(sender=0, recipient=node_identifier, amount=1)\n block = blockchain.add_block(proof)\n block['message'] = 'New block added'\n\n return jsonify(block), 200", "def add(self, block):\n\n try:\n self.blocks[block.height]\n except:\n\n self.blocks[block.height] = [block]\n if self.current_height < block.height:\n self.current_height = block.height\n return\n\n if not block.hash() in [b.hash() for b in self.blocks[block.height]]:\n self.blocks[block.height].append(block)\n loggerutil.debug(\"fork detected for height:\" + str(block.height) +\n \"block candidats:\" + str(self.blocks[block.height]))\n if self.current_height < block.height:\n self.current_height = block.height", "def add_block(self, blk_json):\n block = Block.from_json(blk_json)\n with self.blockchain_lock:\n self._blockchain.add(block)", "def add_record(self, record: EventRecord) -> None:\n with self.session.begin() as session:\n session.add(record)", "def add_peer(self, writer):\r\n address = self.get_address_string(writer)\r\n self.connection_pool[address] = writer\r\n logger.info(\"Added new peer to pool\", address=address)", "def add_block(self, block):\n # checks for errors in new block's position\n if block.index != 0 and len(self.chain) == 0:\n raise Exception('Chain was not initiated. Insert Genesis block first')\n elif len(self.chain) < block.index:\n raise Exception(\"New block's position doesn't fit chain's order\")\n elif block.index == 0 and len(self.chain) == 1:\n raise Exception(\"Genesis block is already exist\")\n elif block.index == 0:\n self.chain.append(block)\n return True\n # validates the block and verifies the chain with the new block\n elif block.is_valid() and self.verify_chain(block):\n self.chain.append(block)\n return True\n else:\n return False", "def mine(self, block):\r\n for n in range(self.maxNonce):\r\n if int(block.generate_hash(), 16) <= self.chain.targetHash:\r\n self.chain.add(block)\r\n break\r\n else:\r\n block.nonce += 1", "def test_unique_peer(self):\n\n\t\tself.db = {}\n\t\ttracker.add_peer(self.db, \\\n\t\t\t\"test_hash\", \"test\", \"100.100.100.100\", 1000)\n\t\tself.assertEqual(self.db, \\\n\t\t\t{'test_hash': [('test', '100.100.100.100', 1000)]})", "def mine_block(): \n # Fetch the current last block of blockchain\n last_block = blockchain[-1]\n # Hash th elast block (=> to be able to compare it to stored hash value)\n hashed_block = hash_block(last_block)\n proof = proof_of_work()\n # Miners should be rewarded, so here is reward\n # reward_transaction = {\n # 'sender': 'MINING',\n # 'recipient': owner,\n # 'amount': MINING_REWARD\n # }\n reward_transaction = OrderedDict([('sender', 'MINING'), ('recipient', owner), ('amount', MINING_REWARD)])\n copied_transactions = open_transactions[:]\n copied_transactions.append(reward_transaction)\n\n block = {\n 'previous_hash': hashed_block,\n 'index': len(blockchain),\n 'transactions': copied_transactions,\n 'proof': proof\n }\n blockchain.append(block)\n return True", "def mine_block(self):\n if self.public_key == None:\n return None\n last_block = self.__chain[-1]\n hashed_block = hash_block(last_block)\n proof = self.proof_of_work()\n reward_transaction = Transaction(\n 'MINING', self.public_key, '', MINING_REWARD)\n\n copied_transactions = self.__open_transactions[:]\n for tx in copied_transactions:\n if not Wallet.verify_transaction(tx):\n return None\n copied_transactions.append(reward_transaction)\n\n copied_chipsactions = self.__open_chipsactions[:]\n for tx in copied_chipsactions:\n if not Wallet.verify_chipsaction(tx):\n return None\n\n copied_messsactions = self.__open_messsactions[:]\n for tx in copied_messsactions:\n if not Wallet.verify_messsaction(tx):\n return None\n\n block = Block(len(self.__chain), hashed_block,\n copied_transactions, copied_chipsactions, copied_messsactions, proof)\n self.__chain.append(block)\n self.__open_transactions = []\n self.__open_chipsactions = []\n self.__open_messsactions = []\n self.save_data()\n for node in self.__peer_nodes:\n url = 'http://{}/broadcast-block'.format(node)\n converted_block = block.__dict__.copy()\n converted_block['transactions'] = [\n tx.__dict__ for tx in converted_block['transactions']]\n converted_block['chipsactions'] = [\n tx.__dict__ for tx in converted_block['chipsactions']]\n converted_block['messsactions'] = [\n tx.__dict__ for tx in converted_block['messsactions']] \n try:\n response = requests.post(url, json={'block': converted_block})\n if response.status_code == 400 or response.status_code == 500:\n print('Block declined, needs resolving')\n if response.status_code == 409:\n self.resolve_conflicts = True\n except requests.exceptions.ConnectionError:\n continue\n return block", "def add_transaction(self, block, transaction):\n cmd = \"\"\"INSERT INTO %s(%s, %s, %s, %s, %s, %s)\n VALUES(?,?,?,?,?,?);\"\"\" %(TABLE_TRANSACTIONS,\n COL_TRANSACTION_BLOCK,\n COL_TRANSACTION_SENDER,\n COL_TRANSACTION_RECEIVER,\n COL_TRANSACTION_AMOUNT,\n COL_TRANSACTION_SUB_TIME,\n COL_TRANSACTION_VER_TIME)\n self.__dbcursor.execute(cmd, (block, transaction.sender,\n transaction.receiver,\n transaction.amount,\n transaction.submitted_time,\n transaction.verified_time))", "async def coin_added(self, coin: Coin, _: uint32, peer: WSChiaConnection):\n\n parent = self.get_parent_for_coin(coin)\n if parent is None:\n # this is the first time we received it, check it's a DID coin\n parent_state: CoinState = (\n await self.wallet_state_manager.wallet_node.get_coin_state([coin.parent_coin_info], peer=peer)\n )[0]\n response = await fetch_coin_spend_for_coin_state(parent_state, peer)\n parent_innerpuz = get_inner_puzzle_from_singleton(response.puzzle_reveal.to_program())\n if parent_innerpuz:\n parent_info = LineageProof(\n parent_state.coin.parent_coin_info,\n parent_innerpuz.get_tree_hash(),\n uint64(parent_state.coin.amount),\n )\n\n await self.add_parent(coin.parent_coin_info, parent_info)\n else:\n self.log.warning(\"Parent coin is not a DID, skipping: %s -> %s\", coin.name(), coin)\n return\n self.log.info(f\"DID wallet has been notified that coin was added: {coin.name()}:{coin}\")\n inner_puzzle = await self.inner_puzzle_for_did_puzzle(coin.puzzle_hash)\n # Check inner puzzle consistency\n assert self.did_info.origin_coin is not None\n\n # TODO: if not the first singleton, and solution mode == recovery\n if not self._coin_is_first_singleton(coin):\n full_puzzle = create_singleton_puzzle(inner_puzzle, self.did_info.origin_coin.name())\n assert full_puzzle.get_tree_hash() == coin.puzzle_hash\n if self.did_info.temp_coin is not None:\n self.wallet_state_manager.state_changed(\"did_coin_added\", self.wallet_info.id)\n\n new_info = DIDInfo(\n self.did_info.origin_coin,\n self.did_info.backup_ids,\n self.did_info.num_of_backup_ids_needed,\n self.did_info.parent_info,\n inner_puzzle,\n None,\n None,\n None,\n False,\n self.did_info.metadata,\n )\n await self.save_info(new_info)\n\n future_parent = LineageProof(\n coin.parent_coin_info,\n inner_puzzle.get_tree_hash(),\n uint64(coin.amount),\n )\n\n await self.add_parent(coin.name(), future_parent)", "def add_partial_decryption_block(self, block):\n\t\tself._blocks.append(block)", "def new_block(self, proof, previous_hash = None):\n #create a new Block & adds it to the chain.\n \n block = {\n 'index' : len(self.chain) + 1,\n 'timestamp' : time(),\n 'transactions' : self.pending_transactions,\n 'proof' : proof,\n 'previous_hash' : previous_hash or self.hash(self.chain[-1])\n }\n\n # Reset the current list of transactions\n self.pending_transactions = []\n\n self.chain.append(block)\n return block\n #pass", "def add_record(self, record):\n pass", "def mine_block(self):\n\t\tlast_block = self.blockchain.last_block\n\t\tlast_proof = last_block['proof']\n\t\tproof = self.blockchain.proof_of_work(last_proof)\n\n\t\t# Forge the new Block by adding it to the chain\n\t\tprevious_hash = self.blockchain.hash(last_block)\n\t\tblock = self.blockchain.new_block(proof, previous_hash)\n\n\t\t# broadcast request for all neighbor to resolve conflict\n\t\tself.broadcast_new_block()\n\n\t\t# now add a special transaction that signifies the reward mechanism\n\t\tnew_transaction = {\n\t\t'node':self.node_identifier,\n\t\t'block_index':block['index'],\n\t\t'reward':self.MINE_REWARD\n\t\t}\n\t\tself.blockchain.new_transaction(new_transaction)\n\t\treturn proof", "def add_block(self, block_name, transactions, timestamp, hash_value):\n\n transacted_amount = 0\n for transaction in transactions:\n transacted_amount += transaction.amount\n self.add_transaction(block_name, transaction)\n\n cmd = \"\"\"INSERT INTO %s(%s, %s, %s, %s, %s)\n VALUES(?,?,?,?,?);\"\"\" %(TABLE_BLOCKCHAIN,\n COL_BLOCKCHAIN_BLOCK,\n COL_BLOCKCHAIN_TRANS_COUNT,\n COL_BLOCKCHAIN_AMOUNT,\n COL_BLOCKCHAIN_TIME,\n COL_BLOCKCHAIN_BLOCK_HASH)\n self.__dbcursor.execute(cmd, (block_name, len(transactions),\n transacted_amount, timestamp,\n hash_value))", "def mine(self):\n new_block = Block(self.block['timestamp'], self.block['car'],\n self.block['id'])\n # link the block to the previous block\n new_block.previous_hash = self._get_previous_hash()\n while True:\n # get a hash\n new_hash = new_block.get_hash()\n # check hash rules, in our case check if the hash starts with\n # self.difficulty number of zeroes\n if new_hash[0] != self.difficulty * \"0\":\n if self.new_block[\"block\"] is None:\n # the hash hasn't been found yet by any other process,\n # therefore increase the nonce and continue\n # miners will use a different mining mechanism in order\n # to increase the probability of finding a hash by\n # a different miner\n new_block.increment_nonce(self.id + 1)\n continue\n break\n break\n\n # NOTE: May happen that two processes find the hash at the same time,\n # because there is not a big difficulty, however, it's not a problem,\n # for sake of the demo it's fine\n\n if self.new_block[\"block\"] is None:\n # this process has found the hash first\n print(self.id, \" - the winner hash\", new_hash)\n new_block.hash = new_hash\n self.new_block[\"block\"] = new_block\n print(self.id, \" - mined the block\")\n else:\n # validate the block found by other process (miner)\n if self.new_block[\"validated\"] is not False:\n print(self.id, \" - validating\")\n # check block's validity\n valid = False\n if self.new_block[\"block\"].is_block_valid():\n # check blockchain's validity when we apply the newly\n # mined block\n if self.is_blockchain_valid(self.new_block[\"block\"]):\n valid = True\n self.new_block[\"validated\"] = valid\n else:\n # NOTE: this demo doesn't take into account the number of\n # miners who approved the block, the block will be rejected\n # if any of them rejected it\n # but usually just more than 50% of miners must approve\n print(self.id, \" - the block has been rejected by other miner\")", "def AddConnectedPeer(self, peer):\n # if present\n self.RemoveFromQueue(peer.address)\n self.AddKnownAddress(peer.address)\n\n if len(self.Peers) > settings.CONNECTED_PEER_MAX:\n peer.Disconnect(\"Max connected peers reached\", isDead=False)\n\n if peer not in self.Peers:\n self.Peers.append(peer)\n else:\n # either peer is already in the list and it has reconnected before it timed out on our side\n # or it's trying to connect multiple times\n # or we hit the max connected peer count\n self.RemoveKnownAddress(peer.address)\n peer.Disconnect()", "def add_piece(self, peer_id, piece):\n self.peers[peer_id].add_piece(piece)", "def new_recv_block(recv_block: Block, sender_id: Optional[int] = None, mute: bool = False) -> bool:\n logging.debug(\"Received block %s\", util.bintos(recv_block.current_hash))\n if not recv_block.verify():\n logging.debug(\"Block %s rejected (failed verification)\",\n util.bintos(recv_block.current_hash))\n return False\n\n r = util.get_db()\n with r.lock(\"blockchain:blocks:lock\"), \\\n r.lock(\"blockchain:last_block:lock\"), \\\n r.lock(\"blockchain:main_branch:lock\"), \\\n r.lock(\"blockchain:orphan_blocks:lock\"), \\\n r.lock(\"blockchain:tx_pool:lock\"), \\\n r.lock(\"blockchain:utxo-block:lock\"), \\\n r.lock(\"blockchain:utxo-tx:lock\"):\n\n # NOTE: Comments like the one below are references to the bitcoin\n # protocol rules\n # OK 2 Reject if duplicate of block we have in any of the three categories\n if r.hexists(\"blockchain:blocks\", recv_block.current_hash) or \\\n r.sismember(\"blockchain:orphan_blocks:\".encode() + recv_block.previous_hash,\n recv_block.dumpb()):\n logging.debug(\"Block %s rejected (already exists)\",\n util.bintos(recv_block.current_hash))\n return False\n\n # Handle the genesis block\n if recv_block.is_genesis():\n r.hset(\"blockchain:blocks\", recv_block.current_hash, recv_block.dumpb())\n t = recv_block.transactions[0]\n o = t.outputs[0]\n ib = TransactionInput(t.id, o.index).dumpb()\n ob = o.dumpb()\n r.hset(\"blockchain:utxo-block:\".encode() + recv_block.current_hash, ib, ob)\n r.hset(\"blockchain:utxo-tx\", ib, ob)\n r.sadd(\"blockchain:main_branch\", recv_block.current_hash)\n _set_last_block_unlocked(r, recv_block)\n logging.debug(\"Genesis block accepted\")\n return True\n\n # OK 11 Check if prev block (matching prev hash) is in main branch or side branches. If not,\n # add this to orphan blocks, then query peer we got this from for 1st missing orphan\n # block in prev chain; done with block\n prev_blockb = r.hget(\"blockchain:blocks\", recv_block.previous_hash)\n if prev_blockb is None:\n logging.debug(\"Block %s is orphan\", util.bintos(recv_block.current_hash))\n r.sadd(\"blockchain:orphan_blocks:\".encode() + recv_block.previous_hash,\n recv_block.dumpb())\n # TODO OPT: Unlock before requesting the block (it could take some time, although\n # the response is asynchronous of course\n if not mute:\n logging.debug(\"Requesting block %s\", util.bintos(recv_block.previous_hash))\n # TODO OPT: Only ask the node we got this from, not everyone, to\n # avoid the flood of incoming blocks later\n chatter.get_blockid(recv_block.previous_hash,\n [sender_id] if sender_id is not None else util.get_peer_ids())\n return False\n\n prev_block = Block.loadb(prev_blockb)\n logging.debug(\"Previous block %s\", util.bintos(prev_block.current_hash))\n if recv_block.index != prev_block.index + 1:\n logging.debug(\"Block %s rejected (wrong index)\", util.bintos(recv_block.current_hash))\n return False\n\n # OK 15 Add block into the tree. There are three cases: 1. block further extends the main\n # branch; 2. block extends a side branch but does not add enough difficulty to make\n # it become the new main branch; 3. block extends a side branch and makes it the new\n # main branch.\n last_block = get_block()\n if recv_block.previous_hash == last_block.current_hash:\n # OK Case 1 (b.previous_hash == last_block):\n logging.debug(\"Block %s extends the main branch\", util.bintos(recv_block.current_hash))\n txos = _validate_block_unlocked(r, recv_block)\n if txos is None:\n return False\n referenced_txos, new_utxos = txos\n \"\"\"\n # NOTE: This is the body of _validate_block_unlocked, annotated, for reference\n referenced_txos: Set[bytes] = set() # the utxos from UTXO-block spent in recv_block\n new_utxos: Dict[bytes, bytes] = {}\n # OK 1 For all but the coinbase transaction, apply the following:\n for t in recv_block.transactions:\n # OK 1 For each input, look in the main branch to find the referenced output\n # transaction. Reject if the output transaction is missing for any input.\n input_amount = 0.0\n for i in t.inputs:\n # Search for i in UTXO-block\n ib = i.dumpb()\n ob = r.hget(\"blockchain:utxo-block:\".encode() + recv_block.previous_hash, ib)\n if ob is None:\n # Not found in UTXO-block, search in new_utxos\n ob = new_utxos.get(ib)\n if ob is None:\n return False\n del new_utxos[ib]\n else:\n # Avoid double-spending of a utxo from UTXO-block in the block\n if ib in referenced_txos:\n return False\n referenced_txos.add(ib)\n o = TransactionOutput.loadb(ob)\n # OK 2 For each input, if we are using the nth output of the earlier transaction,\n # but it has fewer than n+1 outputs, reject.\n # OK 4 Verify crypto signatures for each input; reject if any are bad\n if o.recipient != t.sender:\n return False\n # OK 5 For each input, if the referenced output has already been spent by a\n # transaction in the main branch, reject\n # OK 7 Reject if the sum of input values < sum of output values\n input_amount += o.amount\n if input_amount != sum(o.amount for o in t.outputs):\n return False\n\n new_utxos.update({TransactionInput(t.id, o.index).dumpb(): o.dumpb() \\\n for o in t.outputs})\n \"\"\"\n\n # OK 4 For each transaction, \"Add to wallet if mine\"\n # NOTE: referenced_txos and new_utxos are not empty since we got here\n _create_utxo_block_unlocked(r, recv_block, referenced_txos, new_utxos)\n\n # OK 5 For each transaction in the block, delete any matching transaction from the pool\n # : of the transactions in the pool, keep only the ones that are valid using the\n # new utxo-block to check for validity\n tx_pool = {t.id: t for t in \\\n [Transaction.loadb(tb) for tb in r.hvals(\"blockchain:tx_pool\")]}\n # NOTE: There can't be double spending in the tx pool as it is now\n tx_pool = _rebuild_tx_pool_unlocked(r, tx_pool, recv_block)\n\n _rebuild_utxo_tx_unlocked(r, recv_block, tx_pool)\n\n # Add block to main branch\n r.hset(\"blockchain:blocks\", recv_block.current_hash, recv_block.dumpb())\n r.sadd(\"blockchain:main_branch\", recv_block.current_hash)\n\n _set_last_block_unlocked(r, recv_block)\n logging.debug(\"Block %s accepted\", util.bintos(recv_block.current_hash))\n elif recv_block.index <= last_block.index:\n # OK Case 2 (b.previous_hash != last_block && b.index <= last_block.index)\n # : Add it without doing any validation because validating this now would require a lot\n # of work (actually simulating adding this to its prev as if extending the main branch).\n logging.debug(\"Block %s extends a side branch (not changing main)\",\n util.bintos(recv_block.current_hash))\n r.hset(\"blockchain:blocks\", recv_block.current_hash, recv_block.dumpb())\n else:\n # OK Case 3 (b.previous_hash != last_block && b.index > last_block.index)\n # OK 1 Find the fork block on the main branch which this side branch forks off of\n # : Ascend the side branch, the fork block is the first to be in the main branch\n logging.debug(\"Block %s extends a side branch (changing main)\",\n util.bintos(recv_block.current_hash))\n old_side_branch = [recv_block] # the Blocks in the old side branch\n fork_block = Block.loadb(r.hget(\"blockchain:blocks\", recv_block.previous_hash))\n while not r.sismember(\"blockchain:main_branch\", fork_block.current_hash):\n old_side_branch.append(fork_block)\n fork_block = Block.loadb(r.hget(\"blockchain:blocks\", fork_block.previous_hash))\n old_side_branch.reverse() # starting from the child of the fork block\n # OK 2 Redefine the main branch to only go up to this fork block\n # : Ascend from last_block up to the fork block\n old_main_branch: List[Block] = [] # the Blocks in the old main branch\n b = Block.loadb(r.hget(\"blockchain:blocks\", last_block.current_hash))\n while b != fork_block:\n old_main_branch.append(b)\n b = Block.loadb(r.hget(\"blockchain:blocks\", b.previous_hash))\n old_main_branch.reverse() # starting from the child of the fork block\n logging.debug(\"Fork block %s\", util.bintos(fork_block.current_hash))\n # OK 3 For each block on the side branch, from the child of the fork block to the leaf,\n # add to the main branch:\n for osbi, b in enumerate(old_side_branch):\n # OK 1 Do \"branch\" checks 3-11\n # : Why? we did them when first receiving the block. What could have changed?\n # OK 2 For all the transactions:\n txos = _validate_block_unlocked(r, b)\n if txos is None:\n # Delete invalid blocks and abort\n invalid_ids = [invalid.current_hash for invalid in old_side_branch[osbi:]]\n r.hdel(\"blockchain:blocks\", *invalid_ids)\n return False\n referenced_txos, new_utxos = txos\n \"\"\"\n # NOTE: This is the body of _validate_block_unlocked, annotated, for reference\n referenced_txos: Set[bytes] = set() # the utxos from UTXO-block spent in b\n new_utxos: Dict[bytes, bytes] = {}\n for t in b.transactions:\n # WP 1 For each input, look in the main branch to find the referenced output\n # transaction. Reject if the output transaction is missing for any input.\n # : Search for the referenced outputs in UTXO-block[previous_hash]\n input_amount = 0.0\n for i in t.inputs:\n # Search for i in UTXO-block\n ib = i.dumpb()\n ob = r.hget(\"blockchain:utxo-block:\".encode() + b.previous_hash, ib)\n if ob is None:\n # Not found in UTXO-block, search in new_utxos\n ob = new_utxos.get(ib)\n if ob is None:\n # TODO: Undo any changes, delete invalid blocks and reject\n raise NotImplementedError\n del new_utxos[ib]\n else:\n # Avoid double-spending in the block\n if ib in referenced_txos:\n # TODO: Undo any changes, delete invalid blocks and reject\n raise NotImplementedError\n referenced_txos.add(ib)\n o = TransactionOutput.loadb(ob)\n # OK 2 For each input, if we are using the nth output of the earlier\n # transaction, but it has fewer than n+1 outputs, reject.\n # WP 4 Verify crypto signatures for each input; reject if any are bad\n # : Check that t.sender == o.recipient for each utxo referenced\n if o.recipient != t.sender:\n # TODO: Undo any changes, delete invalid blocks and reject\n raise NotImplementedError\n # OK 5 For each input, if the referenced output has already been spent by a\n # transaction in the main branch, reject\n # WP 7 Reject if the sum of input values < sum of output values\n # : Check that sum(inputs) == sum(outputs)\n input_amount += o.amount\n if input_amount != sum(o.amount for o in t.outputs):\n # TODO: Undo any changes, delete invalid blocks and reject\n raise NotImplementedError\n\n new_utxos.update({TransactionInput(t.id, o.index).dumpb(): o.dumpb() for o \\\n in t.outputs})\n \"\"\"\n\n # OK 5 For each transaction, \"Add to wallet if mine\"\n # NOTE: referenced_txos and new_utxos are not empty since we got here\n _create_utxo_block_unlocked(r, b, referenced_txos, new_utxos)\n\n # OK 5 For each block in the old main branch, from the leaf down to the child of the\n # fork block:\n tx_pool = {t.id: t for t in \\\n [Transaction.loadb(tb) for tb in r.hvals(\"blockchain:tx_pool\")]}\n for b in reversed(old_main_branch):\n # OK 1 For each non-coinbase transaction in the block:\n for t in b.transactions:\n # OK 1 Apply \"tx\" checks 2-9, except in step 8, only look in the transaction\n # pool for duplicates, not the main branch\n # : Why? these have been checked already. There can't be double spending\n # transactions in the pool as it is at this point (current as of the old\n # main branch) + the old main branch, because they wouldn't have gotten\n # there in the first place.\n # OK 2 Add to transaction pool if accepted, else go on to next transaction\n tx_pool[t.id] = t\n\n # OK 6 For each block in the new main branch, from the child of the fork node to the\n # leaf:\n # OK 1 For each transaction in the block, delete any matching transaction from the\n # transaction pool\n # : Of the transactions in the pool, keep only the ones that are valid using the\n # new utxo-block to check for validity\n # NOTE: There can't be double spending in the tx pool as it is now,\n # because it consists of the tx in the previous tx pool and all the\n # tx in the old main branch, and all of these have already been\n # checked for double spending\n tx_pool = _rebuild_tx_pool_unlocked(r, tx_pool, recv_block)\n\n _rebuild_utxo_tx_unlocked(r, recv_block, tx_pool)\n\n # Update main_branch\n for b in old_main_branch:\n r.srem(\"blockchain:main_branch\", b.current_hash)\n for b in old_side_branch:\n r.sadd(\"blockchain:main_branch\", b.current_hash)\n\n r.hset(\"blockchain:blocks\", recv_block.current_hash, recv_block.dumpb())\n _set_last_block_unlocked(r, recv_block)\n logging.debug(\"Block %s accepted\", util.bintos(recv_block.current_hash))\n\n orphans = [Block.loadb(orphanb) for orphanb in \\\n r.smembers(\"blockchain:orphan_blocks:\".encode() + recv_block.current_hash)]\n r.delete(\"blockchain:orphan_blocks:\".encode() + recv_block.current_hash)\n\n logging.debug(\"Block time for %s %f\", util.bintos(recv_block.current_hash),\n time.time() - recv_block.timestamp)\n\n # OK 19 For each orphan block for which this block is its prev, run all these steps (including\n # this one) recursively on that orphan\n for orphan in orphans:\n new_recv_block(orphan, sender_id)\n\n _check_for_new_block()\n return True", "def new_block(self, proof, previous_hash=None):\n servers = [\n \"1.us.pool.ntp.org\",\n \"2.us.pool.ntp.org\",\n \"3.us.pool.ntp.org\"\n ]\n\n response = {}\n\n try:\n response = self.c.request('0.us.pool.ntp.org')\n except Exception:\n for server in servers:\n try:\n response = self.c.request(server)\n\n if response:\n break\n\n except Exception:\n print('\\n //// alternate ntp server didnt work')\n\n block = {\n 'message': 'New Block Forged',\n 'index': len(self.chain) + 1,\n 'timestamp': response.tx_time or time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.chain[-1]['hash'],\n }\n\n # Calculate the hash of this new Block\n block['hash'] = self.hash(block)\n\n # Reset the current list of transactions\n self.current_transactions = []\n\n self.chain.append(block)\n return block", "def claim_block(self, journal, block):\n block.create_wait_certificate()", "def mine(self):\n last_block = self.chain[-1]\n\n nonce = self.proof_of_work()\n previous_hash = self.hash(last_block)\n self.create_block(nonce, previous_hash)", "def addPeer(self,ip,port):\n self.lock.acquire()\n srv_name = 'http://%s:%s' % (ip, port)\n srv = xmlrpclib.Server(srv_name)\n self.peers.append(srv)\n self.log.Print(' added peer:',srv_name,'\\n')\n self.lock.release()", "def new_entry(self,hostname,ip,port): \n\t\tnew_transaction = {\n\t\t'hostname':hostname,\n\t\t'ip':ip,\n\t\t'port':port\n\t\t}\n\t\tbuffer_len = self.blockchain.new_transaction(new_transaction)\n\t\tif buffer_len >= self.BUFFER_MAX_LEN or buffer_len >= self.blockchain.quota-self.BUFFER_MAX_LEN:\n\t\t\tself.mine_block()", "def test_bgp_peer_add(self, m_client, m_BGPPeer):\n # Set up mock objects\n peer = Mock(spec=BGPPeer)\n m_BGPPeer.return_value = peer\n\n # Set up arguments\n address = '1.2.3.4'\n\n # Call method under test\n bgp_peer_add(address, 4, 1)\n\n # Assert\n m_BGPPeer.assert_called_once_with(IPAddress(address), 1)\n m_client.add_bgp_peer.assert_called_once_with(4, peer)", "def add_record(self, msg_id, rec):\n if msg_id in self._records:\n raise KeyError(\"Already have msg_id %r\" % (msg_id))\n self._check_dates(rec)\n self._records[msg_id] = rec\n self._add_bytes(rec)\n self._maybe_cull()", "def new_block(self, proof, previous_hash=None):\n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions':self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\n }\n self.current_transactions = []\n self.chain.append(block)\n return block", "def valid_chain(self, block, prev_block):\n self.stop_mine()\n\n print('\\n //// MINING STOPPED\\n')\n\n print('\\n //// block entering valid_chain')\n pprint(block)\n\n if block is not None and block['message'] != 'mining stopped':\n if block['previous_hash'] == self.hash(prev_block):\n \n # Check that the Proof of Work is correct\n if self.valid_proof(prev_block['proof'], block['proof']):\n if block['index'] == self.last_block['index']:\n if self.last_block['timestamp'] > block['timestamp']:\n del self.chain[-1]\n self.chain.append(block)\n print('\\n //// true from equal index but older timestamp')\n return True\n\n elif self.last_block['timestamp'] == block['timestamp']:\n print('\\n //// true from timestamps are equal block isnt added')\n return True\n else:\n print('\\n //// true timestamp is newer not added but sending false')\n return False\n\n elif block['index'] > self.last_block['index']:\n print('\\n //// true from index is greater and block is added')\n self.chain.append(block)\n return True\n else:\n print('\\n //// false from adding block had index less than block already there')\n else:\n print('\\n //// false from not a valid proof')\n\n else:\n print('\\n //// false from hashes arent equal')\n if (block['timestamp'] < self.last_block['timestamp']):\n if (block['index'] == self.last_block['index']):\n print('\\n //// hashes arent equal but block is older, subtracting and adding')\n del self.chain[-1]\n self.chain.append(block)\n return True\n\n elif (block['timestamp'] > self.last_block['timestamp']):\n if(block['index'] > self.last_block['index']):\n self.chain.append(block)\n return True\n else:\n return True\n\n return False\n\n else:\n return 'reject'", "def _add_to_inv(self, block_):\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1", "def mark_block_seen_by_blockchain_node(\n self,\n block_hash: Sha256Hash,\n block_message: Optional[TBlockMessage],\n block_number: Optional[int] = None,\n ):\n if block_message is not None:\n self.store_block_data(block_hash, block_message)\n\n self._blocks_seen_by_blockchain_node.add(block_hash)\n self.connection.log_debug(\"Confirmed receipt of block {}\", block_hash)", "def new_block(self, proof, previous_hash=None):\n \n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\n }\n\n # Reset the current list of transactions\n self.current_transactions = []\n # Add block to existing chain\n self.chain.append(block)\n return block", "def mine(self):\n if not self.unconfirmed_transactions: \n return False\n \n last_block = self.last_block\n \n new_block = Block(index= last_block.index + 1, \n transactions = self.unconfirmed_transactions,\n timestamp = time.time(),\n previous_hash = last_block.hash)\n\n proof = self.proof_of_work(new_block)\n self.add_block(new_block, proof)\n self.unconfirmed_transactions = []\n return new_block.index", "def test_add_block(self):\n txout = TxOut(tx = \"transaction_hash\",\n nout = 1,\n addr = \"bitcoin_address\",\n value = 133)\n\n block = Block(block_hash=\"block_hash\",\n height=100,\n vout=[txout,],)\n \n balance_processor = BalanceProcessor(storage=self.balance_storage)\n balance_processor.add_block(block)\n\n self.assertEqual(balance_processor.height, 100)\n self.assertEqual(balance_processor.get_balance(\"bitcoin_address\"), 133)\n \n # Commit only commits the data already flushed into storage\n balance_processor.commit()\n\n self.assertEqual(balance_processor.get_balance(\"bitcoin_address\"), 133)\n self.assertEqual(self.balance_storage.get(\"bitcoin_address\"), 0)\n\n # Add empty blocks until the first block is flushed into storage\n for x in range(200):\n block = Block(block_hash=\"block_hash_{}\".format(x),\n height=x+100)\n balance_processor.add_block(block)\n\n self.assertEqual(balance_processor.get_balance(\"bitcoin_address\"), 133)\n self.assertEqual(self.balance_storage.get(\"bitcoin_address\"), 133)\n balance_processor.commit()\n self.assertEqual(self.balance_storage.get(\"bitcoin_address\"), 133)\n storage_height = self.balance_storage.height\n\n # Create a new balance_processor and check balance hasn't changed\n new_processor = BalanceProcessor(storage=self.balance_storage)\n self.assertEqual(self.balance_storage.get(\"bitcoin_address\"), 133)\n self.assertEqual(new_processor.get_balance(\"bitcoin_address\"), 133)\n self.assertEqual(new_processor.height, storage_height)", "def newBlock(preBlock, remitter, number, payee):\r\n index = preBlock.index + 1\r\n timestamp = int(round(time.time() * 1000))\r\n data = (remitter, number, payee).__str__()\r\n previousHash = preBlock.hash\r\n nounce = 0\r\n return Blockchain(index, data, timestamp, nounce, previousHash)", "def save_block(self, drip_campaign_id, start_time, nodes_id):\n new_block = Block(\n drip_campaign_id=drip_campaign_id,\n start_time=start_time,\n nodes_id=nodes_id\n )\n new_block.save()", "def accept_chained_block(self, block):\n for transaction in block.transactions:\n\n if self.wallets.exists_by_address(transaction.recipient_id):\n recipient_wallet = self.wallets.find_by_address(\n transaction.recipient_id\n )\n transaction.apply_to_recipient_wallet(recipient_wallet)\n self.wallets.save_wallet(recipient_wallet)\n\n sender_wallet = None\n if self.wallets.exists_by_public_key(transaction.sender_public_key):\n sender_wallet = self.wallets.find_by_public_key(\n transaction.sender_public_key\n )\n\n if self.transaction_exists(transaction.id):\n self.remove_transaction_by_id(transaction.id)\n elif sender_wallet:\n if transaction.can_be_applied_to_wallet(\n sender_wallet, self.wallets, block.height\n ):\n transaction.apply_to_sender_wallet(sender_wallet)\n self.wallets.save_wallet(sender_wallet)\n else:\n self._purge_sender(transaction.sender_public_key)\n self.block_sender(transaction.sender_public_key)\n\n if (\n sender_wallet\n and sender_wallet.can_be_purged()\n and not self.sender_has_any_transactions(transaction.sender_public_key)\n ):\n self.wallets.delete_by_public_key(sender_wallet.public_key)\n\n # If delegate is in pool wallet manager apply rewards and fees\n if self.wallets.exists_by_public_key(block.generator_public_key):\n delegate_wallet = self.wallets.find_by_public_key(\n block.generator_public_key\n )\n total = block.reward + block.total_fee\n delegate_wallet.balance += total\n self.wallets.save_wallet(delegate_wallet)", "def mine(self, rewardAddress):\n lastBlock = self.getLastBlock()\n index = lastBlock.index + 1\n previousHash = lastBlock.hash\n\n nonce = self.generate(lastBlock)\n\n self.createTransaction( # Reward for the miner\n sender=\"0\", # The miner receive coins \"created\", so there is no sender\n recipient=rewardAddress,\n amount=1,\n )\n\n # Add the block to the new chain\n block = Block(index, self.__currentTransactionsList, nonce, previousHash)\n\n if self.addBlock(block):\n return block\n\n return None", "def add_record(self):\n if not self.record_exists(self.args.date):\n record = self.create_record()\n self.records.append(record)\n self.write_json_file(self.records_file, self.records)\n return True\n return False", "def addBlock(self, aBlock: gp.Block):\n\n if self.blocks[aBlock.y][aBlock.x] != None:\n raise MovementError('game board space not empty')\n self.blocks[aBlock.y][aBlock.x] = aBlock\n self.groundSprites.append(aBlock.sprite)", "def mine_block(self):\n\n last_block = self.__chain[-1]\n hashed_block = hash_util.hash_block(last_block)\n\n proof = self.proof_of_work()\n\n # we are using OrderedDict to get an ordered dictionary so that the hash doesn't change due to the order changing\n reward_transaction = Transaction('MINING', self.hosting_node, MINING_REWARD)\n copied_transactions = self.__open_transactions[:] # copies open_transactions by value (: signifies range, if nothing is\n # specified, then the whole list is copied\n copied_transactions.append(reward_transaction) # reward for miners\n\n block = Block(len(self.__chain), hashed_block, copied_transactions, proof)\n self.__chain.append(block)\n self.__open_transactions = []\n return True", "async def store_peers(self, peer: Peer):\n await self.peers.store(peer)", "def new_block(self, proof, previous_hash=None):\n\n # Create the block\n my_block = Block(proof=proof,\n previous_hash=previous_hash or self.hash(self.last_block))\n my_block.save()\n\n # Update current_transactions with this new block.\n my_block_trans = self.current_transactions_obj\n\n for trans in Transaction.objects.filter(block__isnull=True):\n trans.block = my_block\n trans.save()\n\n block = {\n 'index': my_block.id,\n 'timestamp': my_block.timestamp,\n 'transactions': list(Transaction.objects.filter(block=my_block).values()),\n 'proof': my_block.proof,\n 'previous_hash': my_block.previous_hash,\n }\n\n return block", "def test_publish_block(self):\n # construction and wire the journal to the\n # gossip layer.\n\n btm = BlockTreeManager()\n journal = None\n try:\n journal = Journal(\n consensus_module=mock_consensus,\n block_store=btm.block_store.store,\n block_cache=btm.block_cache,\n state_view_factory=StateViewFactory(DictDatabase()),\n block_sender=self.block_sender,\n transaction_executor=self.txn_executor,\n squash_handler=None\n )\n\n self.gossip.on_batch_received = journal.on_batch_received\n self.gossip.on_block_received = journal.on_block_received\n\n journal.start()\n\n # feed it a batch\n batch = Batch()\n journal.on_batch_received(batch)\n\n wait_until(lambda: self.block_sender.new_block is not None, 2)\n self.assertTrue(self.block_sender.new_block is not None)\n\n block = BlockWrapper(self.block_sender.new_block)\n journal.on_block_received(block)\n\n # wait for the chain_head to be updated.\n wait_until(lambda: btm.chain_head.identifier ==\n block.identifier, 2)\n self.assertTrue(btm.chain_head.identifier == block.identifier)\n finally:\n if journal is not None:\n journal.stop()", "def add_block_as_child_node(self, block, node):\n child = etree.SubElement(node, \"unknown\")\n block.add_xml_to_node(child)", "def _do_add_block(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n name = args[3]\r\n block_type = int(args[4])\r\n starting_address = int(args[5])\r\n length = int(args[6])\r\n if bus_type == 'rtu':\r\n slave = self.server._servers[0].get_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n slave = self.server._servers[1].get_slave(slave_id)\r\n slave.add_block(name, block_type, starting_address, length)\r\n return name", "def add_fragment(self, fragment, delay_sort = False):\n assert isinstance(fragment, Fragment)\n assert fragment.model_id == self.model_id\n\n ## add new chain if necessary\n try:\n chain = self.chain_dict[fragment.chain_id]\n except KeyError:\n chain = Chain(\n model_id = fragment.model_id,\n chain_id = fragment.chain_id)\n self.add_chain(chain, delay_sort)\n\n chain.add_fragment(fragment, delay_sort)", "def mine(self):\n if self.unconfirmed_transactions == []:\n return False\n\n transactions = self.unconfirmed_transactions\n for transaction in transactions:\n author = transaction['author']\n public_key_path = author + '_public.pem'\n content = transaction['content']\n signature = transaction['signature']\n verify = rsa_verify(content, signature, public_key_path)\n if verify == False:\n print('Transaction not verified.')\n return \n previous_block = self.last_block\n last_index = previous_block.index\n\n index = last_index + 1\n timestamp = time.time()\n previous_hash = previous_block.hash\n\n newblock = Block(index=index, transactions=transactions, timestamp=timestamp, previous_hash=previous_hash)\n proof = Blockchain.proof_of_work(newblock)\n\n self.add_block(newblock, proof)\n self.unconfirmed_transactions = []\n return newblock.index", "def add_party(self, party_id, contact_info):\n print('Adding party:', party_id)\n client = self.application.__init_blockchain_client__()\n response = client.addParty(party_id, contact_info)\n client.close()\n\n return response", "def mine():\n block = app.miner(app.blockchain)\n\n response = {\n 'message': \"New block is mined!\",\n 'block': block.dump()\n }\n\n return jsonify(response), 200", "def new_block(self, proof, previous_hash=None):\n\n\t\tblock = {\n\t\t\t'index': len(self.chain) + 1,\n\t\t\t'timestamp': time(),\n\t\t\t'transactions': self.current_transactions,\n\t\t\t'proof': proof,\n\t\t\t'previous_hash': previous_hash or self.hash(self.chain[-1]),\t\t\n\t\t}\n\n\t\t#Reset current list of transactions\n\t\tself.current_transactions = []\n\n\t\tself.chain.append(block)\n\t\treturn block", "def mine_block(last_block, data):\n timestamp = time.time_ns()\n last_hash = last_block.hash\n difficulty = Block.adjust_difficulty(last_block, timestamp)\n nonce = 0\n hash = crypto_hash(timestamp, last_hash, data, difficulty, nonce)\n\n while hex_to_binary(hash)[0:difficulty] != '0'* difficulty:\n nonce += 1\n timestamp = time.time_ns()\n difficulty = Block.adjust_difficulty(last_block, timestamp)\n hash = crypto_hash(timestamp, last_hash, data, difficulty, nonce)\n\n return Block(timestamp, last_hash, hash, data, difficulty, nonce)", "def AddPeer(self, peer_id):\n host, port = self._LookupPeer(peer_id)\n logging.debug('Adding peer %r %s:%d.' % (peer_id, host, port))\n peer = xmlrpclib.ServerProxy('http://%s:%s' % (host, port))\n self.peers[peer_id] = host, port, peer", "def create_block(self, nonce, previous_hash) -> None:\n block = {\n 'block_number': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.transactions,\n 'nonce': nonce,\n 'previous_hash': previous_hash\n }\n\n self.transactions = []\n self.chain.append(block)", "async def add_block(\n self,\n position: typing.Tuple[int, int, int],\n block_name: typing.Union[str, typing.Any],\n immediate=True,\n block_update=True,\n block_update_self=True,\n lazy_setup: typing.Callable[[typing.Any], None] = None,\n check_build_range=True,\n block_state=None,\n network_sync=True,\n ) -> typing.Optional[typing.Any]:\n raise NotImplementedError", "async def new_block(request: Request) -> dict:\n block: dict = await request.json()\n block = await chain.add_block(block)\n response_block = Block(**block).to_dict()\n\n miner_ip = f\"{request.client.host}:{request.client.port}\"\n for node in chain.peers:\n async with httpx.AsyncClient() as client:\n _ = await client.get(f\"{node}/\")\n temp_chain = {f\"Block-{height}\": data.to_dict()\n for height, data in enumerate(chain.serialized)}\n return {\"miner_address\": miner_ip,\n \"latest_block\": response_block.dict(),\n \"new_chain\": temp_chain, }", "def add_block(self, env):\n block_size = (0.04, 0.04, 0.04)\n block_pose = self.random_pose(env, block_size)\n block_urdf = 'assets/stacking/block.urdf'\n block_id = env.add_object(block_urdf, block_pose)\n self.object_points[block_id] = np.float32((0, 0, 0)).reshape(3, 1)\n self._IDs[block_id] = 'block'\n return block_id", "def block_eid(self, block_id):\n ...", "def add_peer(self, peer, ws_extra_headers=None, ws_heartbeat=None):\n logger.info(\"Connecting to peer {}\".format(peer))\n return self.connection_manager.get_peer(\n peer,\n reconnect=not self.receptor.config._is_ephemeral,\n ws_extra_headers=ws_extra_headers,\n ws_heartbeat=ws_heartbeat,\n )", "def mine_block(self):\n if self.hosting_node == None:\n return None\n # Fetch the currently last block of the blockchain\n last_block = self.__chain[-1]\n print(last_block)\n # Hash the last block (to be able to compare it to the stored hash value)\n hashed_block = hash_block(last_block)\n proof = self.proof_of_work()\n # Miners should be rewarded, so let's create a reward transaction\n reward_transaction = Transfer(self.hosting_node, \"MINING\", MINING_REWARD)\n # Copy transaction instead of manipulating the original open_transactions list\n # This ensures that if for some reason the mining should fail, we don't have the reward transaction stored in the open transactions\n copied_transactions = self.__open_transfers[:]\n for tx in copied_transactions:\n if not Wallet.verify_transfer(tx):\n return None\n copied_transactions.append(reward_transaction)\n block = Block(len(self.__chain), hashed_block, copied_transactions, proof)\n self.__chain.append(block)\n self.__open_transfers = []\n self.save_data()\n return block", "def add_block_str(self, block_str):\n return self._add_block_str(block_str, True, False)", "def new_block(self, proof, previous_hash=None):\n block = {\n 'index': len( self.chain ) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'merkle': self.hash(self.current_transactions),\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1])\n }\n\n # Reset the current list of transactions\n self.current_transactions = []\n\n # Add the block to the chain\n self.chain.append( block )\n self._write_chain()\n\n return block", "def new_block(self, proof, previous_hash=None):\r\n block = {\r\n 'index': len(self.chain) + 1,\r\n 'timestamp': time(),\r\n 'transactions': self.current_transactions,\r\n 'proof': proof,\r\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\r\n }\r\n\r\n # reseta a atual lista de transacoes\r\n self.current_transactions = []\r\n\r\n self.chain.append(block)\r\n return block" ]
[ "0.6661918", "0.663433", "0.66281337", "0.6614235", "0.65480334", "0.65008473", "0.64870596", "0.6486342", "0.6483645", "0.63460624", "0.62318516", "0.61465347", "0.6141715", "0.61389035", "0.61175793", "0.61068624", "0.6093761", "0.6086257", "0.60828084", "0.606044", "0.6018873", "0.60002136", "0.5944185", "0.5944185", "0.5941485", "0.5933974", "0.5905736", "0.5856099", "0.57844776", "0.5756453", "0.5738075", "0.5735829", "0.5728981", "0.5695618", "0.5695454", "0.56797993", "0.56763554", "0.5671474", "0.56596935", "0.562722", "0.56008", "0.55847585", "0.5578026", "0.55341846", "0.5507247", "0.5497571", "0.54947764", "0.54833037", "0.54704016", "0.54533505", "0.5442059", "0.5403034", "0.5377858", "0.53539324", "0.5336706", "0.53246146", "0.53168756", "0.53103954", "0.5308073", "0.53000593", "0.5299906", "0.5299845", "0.5253434", "0.5227768", "0.52223897", "0.52204996", "0.5195024", "0.51806843", "0.51726305", "0.5162752", "0.51602644", "0.5158404", "0.51504785", "0.51182246", "0.51074535", "0.5105212", "0.5104944", "0.51014876", "0.5097545", "0.5089466", "0.5088561", "0.50881773", "0.50861055", "0.50819683", "0.5062151", "0.5036471", "0.5034017", "0.5009453", "0.50079334", "0.49996144", "0.4989944", "0.498662", "0.49847782", "0.49783543", "0.49774766", "0.49758887", "0.49707282", "0.49693912", "0.49511984", "0.4945958" ]
0.66007864
4
Clears the peak_to_peer info which can get quite large.
async def clear_sync_info(self) -> None: self.peak_to_peer = orderedDict()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self):\n self._fingerprint = 0", "def clear(self):\n self.mismatch_error = None\n self.pt_outs = None\n self._onnx_graph = None\n self.upper_graph_info = None\n self.lower_graph_info = None", "def clear(self):\n self.molo_tcp_pack.clear()\n self.tranparency = False\n self.append_recv_buffer = bytes()\n self.append_send_buffer = bytes()\n self.append_connect = True", "def clear(self):\n self._members = []\n self._size = 0\n self._updated = True\n self._BFS_collect = None\n self._center = None", "def reset():\n\n Follower.clear()", "def clear(self):\n self.molo_tcp_pack.clear()\n self.append_recv_buffer = bytes()\n self.append_send_buffer = bytes()\n self.append_connect = True\n self.client_status = None", "def restore_peak_size(self):\n if self.left_peak_size > 0 and self.peak_size < self.size:\n # Account for the left_peak_size which might be less than peak_size\n diff = min(self.size - self.peak_size, self.left_peak_size)\n self.peak_size += diff\n self.left_peak_size -= diff", "def clear(self):\n self.__attendees = []\n self._track_changes()", "def clear(self):\n self.append_send_buffer = bytes()\n self.append_connect = True", "def clear(self):\n self._latencies = [0] * len(BUCKETS)", "def clear(self):\n \n self.node_set.clear()\n self.prefix.clear()\n self.suffix.clear()\n self.num_node = 0\n self.edges = 0", "def reset(self):\n self._clusters = {}\n self._clusters_val = {}\n self._centroids = {}\n self.store()", "def reset(self):\n self._monitor.notify_received()\n self._pinger.stop()\n self._mark_fresh()", "def clear_pins(self):\n self.pins = {}\n self.all_pins = set()\n self.pin_groups = {} \n # DO NOT clear the blockages as these don't change\n self.rg.reinit()", "def reset(self):\n if self.monotonic_energy is not None:\n self.monotonic_energy.reset()\n if self.chunk_energy is not None:\n self.chunk_energy.reset()\n self.bd_L_prev = 0\n self.key_tail = None", "def rm_calibration(self):\n\n self.bin_edges_kev = None", "def clear(self):\n wait(self.proto.vanish())", "def invalidate_min_max(self):\n self.max_amplitude = None\n self.min_amplitude = None\n self.max_wavenumber = None\n self.min_wavenumber = None", "def clearResonancePeakDimContribs(resonance,peaks=None):\n\n if not peaks:\n peaks = []\n\n peakDict = {}\n for peak in peaks:\n peakDict[peak] = True\n \n peakDims = {} \n for contrib in resonance.peakDimContribs:\n peakDim = contrib.peakDim\n \n if (not peakDict) or peakDict.get(peakDim.peak):\n peakDims[peakDim] = True\n peakContribs = contrib.peakContribs\n contrib.delete()\n \n for peakContrib in peakContribs:\n if not peakContrib.peakDimContribs:\n peakContrib.delete()", "def reset(self):\n self._topics.clear()", "def reset(self):\n self.sum = [0.] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.] * len(self.topk)", "def clear(self):\n\n self.__fasteners.clear()\n self.__update()", "def clearPulse(self):\n self.pulses = dict() # old mode for compatibility reasons\n self._params[\"pulses\"] = dict() # old mode\n self.totalPulse[:] = 0 # old mode\n self.sendPulse() # old mode\n\n self.clearMarkersList() # new mode\n self.pulseList = []\n self.preparePulseSequence()\n self.prepareMarkerSequence()\n self.sendPulseSequence()", "def reset(self):\n self.enc_len = None\n self.precomputed_enc_h = None\n self.mask = None\n self.prev_attn = None", "def reset(self):\n for i in range(0, len(self.__counts)):\n self.__counts[i] = 0\n self.__overflow = 0\n self.__total_count = 0\n self.__total_values = 0\n self.__min = None\n self.__max = None", "def reset(self):\n self.visited = set()\n del self.targets[0]", "def clear_face(self):\n rospy.loginfo('clearing all learned faces')\n self._clear_srv()", "def reset(self):\n self.edges = None\n self.chi = None\n self.k = None\n self.n_bins = None\n self.classes = None\n self.n_params = None", "def clear_statistics(self, sniff_port_list):\n pass", "def reset(self):\n self.det_link_map = OrderedDict()\n self.id_link_map = OrderedDict()\n self.declarations_table = None\n self.annotations_table = None\n self.num_frames = 0\n self.num_frames_by_uid = {}\n self.num_frames_by_uid_pre_remove = {}", "def clear(self):\n self.chromosome_list = []", "def reset_recordings(self):\n for rec in self._recordings.values():\n rec.resize(0)", "def clearup(self):\n\t\tself.outChannel.clearup()\n\t\tself.inChannel.clearup()", "def clear():", "def reset_auctioneer(self):\n self.bidders.clear()\n self._highest_bid = 0\n self._highest_bidder = None", "def clear(self) -> None:\n self._used = set()\n self.search_pos = 1", "def _clear(self):\n self.events = []\n self.last_on = None\n self.last_off = None", "def clear(self):\n self._x_prev = None\n self._y_prev = None", "def peakmem_reference(self, *args):\n pass", "def clear(self):\n self.__indexclusters[:] = []\n self.__sample_size = 0\n self.__samples[:] = []\n self.__simifunc = None", "def clear_monitor(self):\n self._monitored_patients = PatientList()", "def clear(self):\n self.log(u\"Clearing sync map\")\n self.fragments_tree = Tree()", "def clear(self):\n self.root = _NGramMapNode()\n self.size_freqs = dict()\n self.ele_freqs = dict()", "def clear(self):\n self.initialize()\n self.device_disconnect()", "def clear_streams(self):\n self.stop_streams()\n self.streams.clear()", "def reset(self):\n self.values = None\n self.keys = None\n self.mask = None", "def reset(self):\n self.ref_value = 0.0\n self._average = 0.0\n self.num_samples = 0", "def reset(self):\n self.bbox = None\n self.true = None\n self.meta = None", "def reset(self):\n # type: () -> None\n self.digest.clear()\n self.offset.clear()\n self.buffer.clear()\n self.position = 0\n self.counter = 0\n self.finished = False", "def reset(self) -> None:\n self.val = None\n self.notes = []\n self.blocked = False\n self.forbidden = False", "def clear(self):\r\n self.nodes = collections.defaultdict(list)\r\n self.nodes_mapping = collections.defaultdict(list)\r\n self.edges = 0\r\n #self.children_length={}\r\n self.parents_length = collections.defaultdict(lambda : collections.defaultdict(int))", "def peak(self):\n pass", "def clear(self) -> None:\n self._last_err = 0", "def clear(self):\n self._clear(is_link=True)", "def clear(self):\n self._out = None", "def clear(self):\n self._baseline = 0\n self._sensitivity_im = 0\n self._is_update = False", "def reset_local_buffers(self) -> None:\n for buf in self.values():\n buf.reset_agent()", "def reset(self):\n self._accumulated_time.clear()\n self._hit_count.clear()", "def clear_recipients(self):\n self._to = []\n self._cc = []\n self._bcc = []", "def clear(self) -> None:\n self.data = {} # defaultdict fails (T282865)\n self.size = 0", "def reset(self):\n self.baseline = None\n self.cut = None\n self.manual_push = 0", "def reset(self):\n self.visited = False\n self.calculated = False\n self.past_value = self.value\n self.value = 0", "def clear_frag_desc(self):\n self.frag_cnt = 0\n self.frag_desc = []\n return", "def clear(self):\n LongObjectHashMap.self.clear()", "def clear(self):\n LongObjectHashMap.self.clear()", "def reset_visited(self):\n self.__visited = False", "def reset(self):\n for k,v in self.events.items():\n self.events[k] = None", "def removeRunningPeaks(self):\n if not self.no_fitting:\n self.mfitter.removeRunningPeaks()", "def clearPeakDim(peakDim,contrib=None):\n \n if contrib:\n if contrib.isDeleted:\n contribs = []\n else:\n contribs = [contrib,]\n else:\n contribs = peakDim.peakDimContribs \n \n for contrib in contribs:\n contrib.delete()\n \n for peakContrib in peakDim.peak.peakContribs:\n if not peakContrib.peakDimContribs:\n peakContrib.delete()", "def reset(self):\n self.temp_data.clear()", "def set_peak(self, p):\n self.peak = p", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._is_ddp = get_rank() > -1", "def resetDetector (self):\n self.mpr121._reset ()", "def clear_nastran(self):\n self.eid_map = {}\n self.nid_map = {}\n self.eid_to_nid_map = {}\n self.element_ids = None\n self.node_ids = None", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def unique_peaks(self):\n return(None)", "def clear(self) -> None:", "def clear(self):\n self.feedback.clear()\n self.ignored_feedback.clear()\n self.suppressions.clear()\n self.suppressed_labels.clear()\n self.hiddens.clear()\n self._tool_data.clear()\n self.group = None\n self.groups.clear()\n self.group_names.clear()\n self.hooks.clear()\n self.submission = None\n self.result = None\n self.resolves.clear()\n self.format = Formatter()\n self.clear_overridden_feedback()", "def clear(self):\n self._plot_data_cache = {}\n self._outstanding_requests = {}", "def reset(self) -> None:\n self.statistics = defaultdict(int)", "def clear_neighbor(self):\n\t\tself.neighbors.clear()\n\t\tself.neighbors.append(self)", "def clear(self):\n self._data = []", "def clear(self):\n self._data = []", "def clear(self):\n self.counts = [{} for _ in range(len(self.counts))]", "def clear(self):\n pass", "def clear(self):\n pass", "def clear(self):\n pass", "def clear(self):\n\n\t\tfor chain in self.chain:\n\t\t\tchain.clear()\n\n\t\tself.chain = []\n\t\tself.remark = []", "def reset(self) -> None:\n self.statistics = defaultdict(float)", "def clear(self):\n self._nodes = { }\n self._arcs = set()", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def _bg_clean_up_peer(self, peer):\n with self._peers_lock:\n del self._peers[peer.peer_id]\n del self._sock_to_peer[peer.socket]\n peer.close()\n peer = None" ]
[ "0.6206844", "0.58822215", "0.5839706", "0.5757953", "0.57487977", "0.5747801", "0.5705526", "0.5700115", "0.5695211", "0.56707", "0.5589478", "0.5570707", "0.5559506", "0.55220956", "0.55071974", "0.55066085", "0.5479404", "0.5468562", "0.5449649", "0.5447672", "0.5441297", "0.54409546", "0.54408336", "0.5408519", "0.5401802", "0.5380283", "0.5370429", "0.53647", "0.5354091", "0.5350153", "0.5334347", "0.5326061", "0.5319398", "0.5307005", "0.5300086", "0.52985585", "0.52942944", "0.529275", "0.5292104", "0.5289391", "0.52864254", "0.52830094", "0.52538484", "0.5244979", "0.5241881", "0.5241783", "0.52365", "0.52343524", "0.5232473", "0.5230573", "0.5225253", "0.5217797", "0.521067", "0.52065986", "0.52059996", "0.51986355", "0.51986265", "0.5195223", "0.5194656", "0.5192358", "0.5191036", "0.51862663", "0.51843613", "0.51793855", "0.51793855", "0.5177663", "0.51739347", "0.51730424", "0.5168323", "0.516309", "0.51564825", "0.5155735", "0.5154422", "0.5140831", "0.5139425", "0.5139425", "0.5139425", "0.5137357", "0.51359135", "0.5120556", "0.5112414", "0.5094712", "0.50935596", "0.5093297", "0.5093297", "0.5091744", "0.50880736", "0.50880736", "0.50880736", "0.5087239", "0.5082916", "0.5080847", "0.5080325", "0.5080325", "0.5080325", "0.5080325", "0.5080325", "0.5080325", "0.5080325", "0.5080271" ]
0.7599027
0
Peak wavelength when the curve is expressed as power density.
def lambda_max(self): return const.b_wien / self.temperature
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wavelength(energy):\r\n return 2 * np.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def wavelength(self):\n return wavelength(energy)", "def peak(self):\n pass", "def wavelength(energy):\n return 2 * PI * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def peak_PSF(self):\n return self.compute_PSF(np.zeros(self.N_zern))", "def getPeakMagneticField(self):\n return self.solenoid.getPeakMagneticField()", "def calculate_signal_power(self, sender, freq_range):\r\n distance = np.sqrt(\r\n np.power(self.x - sender.x, 2) + np.power(self.y - sender.y, 2))\r\n avg_frequency = np.average(freq_range) * 1e6\r\n wavelength = settings.speed_of_light / avg_frequency\r\n received_signal_power = (\r\n sender.tx_power * sender.gain * self.gain * np.power(\r\n wavelength, 2)) / np.power(4 * np.pi * distance, 2)\r\n return received_signal_power", "def getPeakValue( self ):\n nCurrentMax = max( self.data.max(), -self.data.min() )\n return float(nCurrentMax) / self.getSampleMaxValue()", "def spectrum(self, wl: Union[float, ndarray]) -> Union[float, ndarray]:\n wlm = wl * 1e-9 # Wavelength to meters\n return 3.74183e-16 * wlm ** -5. / (np.exp(0.014388 / (wlm * self.temp)) - 1.)", "def peak(self):\n return self.peak", "def amplitude(self):\r\n return np.sqrt(self.maxint) * self.weights", "def plot_powerlaw(self, **kwargs):\n\n if self.gamma is None:\n self.exponent()\n p = powerlaw.plot(exponent=-self.gamma,\n xmax=self.max_deg, xmin=self.k_min,\n **kwargs\n )\n pylab.show()\n return p", "def max_power_spectrum(sig, FS):\n\n if np.std(sig) == 0:\n return float(max(signal.welch(sig, int(FS), nperseg=len(sig))[1]))\n else:\n return float(max(signal.welch(sig/np.std(sig), int(FS), nperseg=len(sig))[1]))", "def max_power(self):\r\n est_max_power = self.model * self.max_pwm / 100\r\n return est_max_power", "def wavelength(self,freq):\n return self.phase_velocity()/freq", "def test_power(self):\r\n a = 6 # shape\r\n samples = 5000\r\n max = -0.06\r\n min = -3.3\r\n s = np.random.power(a, samples) * -1 * (min - max) + min\r\n plt.hist(s, bins=30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()", "def peak(data, fft_data=None):\n return np.max(np.abs(data))", "def amplitude(self):\n return np.sqrt(self.maxint) * self.__weights", "def get_power(self) -> float:\n\n #:READ[n][:CHANnel[m]][:SCALar]: POWer[:DC]?\n return float(self._inst.query(\":READ:POW?\"))", "def powerlaw(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def getScaledWaveform(self):\n return self.data*self.volt_gain - self.volt_offset", "def clipped_power(self):\n ac = self.ac()\n dc = self.dc()\n\n return np.where(ac < ac.max(), 0, dc - ac)", "def get_power(self):\r\n return self.p", "def peak(self):\n\n return self._data[0]", "def peak_PSF(self):\n im, strehl = self.compute_PSF(np.zeros(self.N_act))\n return strehl", "def wavelength_ex(hdulist):\n wave = hdulist[1].data['loglam']\n wave = 10**wave\n\n return wave", "def test_extended_truncated_power_law_fit(self):\n\t\t\n\t\t#TODO: fix this; low priority\n\t\tdetails= self.watcher.analyze(layers=[self.fc1_layer], pl_package=POWERLAW_PACKAGE, fit=E_TPL)\n\t\tactual_alpha = details.alpha[0]\n\t\tactual_Lambda = details.Lambda[0]\n\n\t\tself.assertTrue(actual_Lambda > -1) #Lambda must be set for TPL\n\t\t\n\t\t# these numbers have not been independently verified yet\n\t\texpected_alpha = 2.3\n\t\texpected_Lambda = 0.006069\n\t\tself.assertAlmostEqual(actual_alpha,expected_alpha, places=2)\n\t\tself.assertAlmostEqual(actual_Lambda,expected_Lambda, places=2)", "def find_closest_peaks(power, freqs, guess_freqs):\n # find the maxima in the power spectrum\n maxima = sig.argrelextrema(power, np.greater)\n\n maxima_freqs = np.zeros(freqs.shape)\n maxima_freqs[maxima] = freqs[maxima]\n\n # determine the peaks as the closest maxima to\n # each of the standing wave frequencies\n peak_indices = [find_nearest_idx(maxima_freqs, f) for f in guess_freqs]\n return peak_indices", "def intensity(self):\r\n return np.power(prb.amplitude, 2)", "def waveparameterh(L):\r\n return 8.13 - ((250 - 0.7 * L) / 125) ** 3", "def peak_height(self):\n return np.array([max(self.waveform[ch]) for ch in range(self.nchannels)])", "def wavelength(self):\n return self.get(self._names[\"wavelength\"])", "def wind_ppa():\n per_kwh = 0.0384 # [$/kWh]\n\n return per_kwh", "def get_wavelength(self):\n E = -self.E0*(1.0/self.n_low**2 - 1.0/self.n_high**2)\n return SI['hc']*1e12/(E*SI['keV'])", "def get_power(self):\r\n x = self.query('SOURce1:POWer:POWer?')\r\n if x == None: return None\r\n return float(x)", "def get_power(self):\r\n x = self.query('SOURce1:POWer:POWer?')\r\n if x == None: return None\r\n return float(x)", "def normalizedpeakerror():\r\n peak_simulation = max(hydrograph)\r\n peak_observation = max(obs_data)\r\n peak_error = abs(peak_simulation - peak_observation)/(peak_observation + peak_simulation)\r\n return peak_error", "def peak(self) -> Tuple[MeasureInput, MeasureResult]:\n assert self._data\n return self._data[0][2]", "def get_powerperband(self, bandwidth):\n ls = self.get_degrees()\n return self._powerperdegree() * ls * np.log(bandwidth)", "def lmin(self):\n cond = (self.transmit / self.transmit.max()) > 1./100\n return min(self.wavelength[cond])", "def wavefunction(self, x):\n return ( float(1) / math.pi**(float(1)/4)) * math.exp( x**2 / float(-2))", "def get_power(frames, num_fft):\n #a = get_magnitude(frames, num_fft)\n #b = np.square(a)\n #print('max : ', np.max(a))\n #print('min : ', np.min(a))\n #print('sq max : ', np.max(b))\n #print('sq min : ', np.min(b))\n #print(a.shape)\n #print(b.shape)\n #return b/num_fft\n return np.square(get_magnitude(frames, num_fft) / np.sqrt(num_fft))", "def logpeak(x, p=default()):\n model = p[0] - p[1]*(x**2)\n return model", "def p(self) -> float:\n return self._pwr.real", "def setPowerFromDensity(self):\n self.p.power = self.p.powerDensity * self.getHMMass()", "def peak(self) -> Point:\n return self.most_intense_point()", "def test_ww_power_law_fit_directly(self):\n\n\t\t\tnp.random.seed(123)\n\t\t\tdata = np.random.pareto(2.5, 100)\n\t\t\t\n\t\t\n\t\t\tresult = WW_powerlaw.pl_fit(data, xmax=np.max(data), pl_package=POWERLAW_PACKAGE)\n\t\t\texpected_alpha = result.alpha\n\t\t\tself.assertAlmostEqual(expected_alpha, 2.5, delta=0.1)\n\t\n\t\t\tresult = WW_powerlaw.pl_fit(data, xmax=np.max(data), pl_package=WW_POWERLAW)\n\t\t\tactual_alpha = result.alpha\t\n\t\t\tself.assertAlmostEqual(expected_alpha, actual_alpha, delta=0.1)", "def get_power(self):\r\n _debug('simq03b_api.get_power')\r\n \r\n x = self.query('POWer?')\r\n if x == None: return None\r\n return float(x)", "def _powerlaw(self, x: np.ndarray, y: np.ndarray) -> float:\n\n # regress\n def _regress(x, y):\n slope, intercept, rval, pval, err = linregress(x, y)\n return slope, rval\n\n # log of inputs\n logx = np.log(x)\n logy = np.log(y)\n\n # naive fit\n rmin = self.rmin\n if rmin is None:\n exponent, rval = _regress(logx, logy)\n return exponent\n\n # iteratively trim the fat tail\n for ymin in np.unique(y):\n\n # trim off the fat tail\n greater_than = y >= ymin\n logx_ = logx[greater_than]\n logy_ = logy[greater_than]\n exponent, rval = _regress(logx_, logy_)\n\n # check convergence\n if abs(rval) > rmin:\n return exponent\n\n # give up\n return np.nan", "def lmax(self):\n cond = (self.transmit / self.transmit.max()) > 1./100\n return max(self.wavelength[cond])", "def peak_height(self, logM, k = [], pk = []):\n # Checks\n pk=np.atleast_2d(pk)\n assert len(pk[0])==len(k), \"Length of scales is different from power spectra\"\n sigma2 = self.mass_variance(logM,k,pk)\n nu = self.delta_sc/sigma2**.5\n return nu", "def peak_time(self):\n return np.array([self.wftime[ch][self.waveform[ch].argmax()] for ch in range(self.nchannels)])", "def derive_powerlaw(wavelength, alpha, K_wave):\n # Create extinction law\n law = wavelength**(-1.0 * alpha)\n\n # We'll identify K-band as 2.14 microns\n idx = np.where(abs(wavelength - K_wave) == min(abs(wavelength - K_wave)))\n A_AKs_at_wave = law / law[idx]\n\n return A_AKs_at_wave", "def minDeviation(self,wavelength = None):\n if wavelength == None:\n wavelength = self.wavelength\n\n return Prism.minDeviation(self,wavelength)", "def wavelength(self):\n return self.getparam(\"WAVELENGTH\")", "def wavelength(self):\n return self.getparam(\"WAVELENGTH\")", "def power(self) -> int:\n return self._power_consumption", "def evaluate_peak_norm(x, y, amplitude, x_0, y_0, r_in, width):\n rr = (x - x_0) ** 2 + (y - y_0) ** 2\n rr_in = r_in ** 2\n rr_out = (r_in + width) ** 2\n\n # Because np.select evaluates on the whole rr array\n # we have to catch the invalid value warnings\n # Note: for r > r_out 'np.select' fills automatically zeros!\n with np.errstate(invalid='ignore'):\n values = np.select([rr <= rr_in, rr <= rr_out],\n [np.sqrt(rr_out - rr) - np.sqrt(rr_in - rr),\n np.sqrt(rr_out - rr)])\n return amplitude * values / np.sqrt(rr_out - rr_in)", "def Qmax(self):\n return (4 * np.pi * np.sin(self.two_theta_max()*radians/2)\n / self.wavelength)", "def find_local_peak(flux, x, width, figname=None):\n width = int(round(width))\n if width%2 != 1:\n width += 1\n half = int((width-1)/2)\n\n i = int(round(x))\n\n # find the peak in a narrow range\n\n i1, i2 = max(0, i-half), min(flux.size, i+half+1)\n\n if i2 - i1 <= 4:\n # 4 is the number of free parameters in fitting function\n return None\n\n # find the peak position\n imax = flux[i1:i2].argmax() + i1\n xdata = np.arange(i1,i2)\n ydata = flux[i1:i2]\n # determine the initial parameters for gaussian fitting + background\n p0 = [ydata.max()-ydata.min(), imax, 3., ydata.min()]\n # least square fitting\n #p1,succ = opt.leastsq(errfunc2, p0[:], args=(xdata,ydata))\n p1, cov, info, mesg, ier = opt.leastsq(errfunc2, p0[:],\n args=(xdata, ydata), full_output=True)\n\n res_lst = errfunc2(p1, xdata, ydata)\n\n if res_lst.size-len(p0)-1 == 0:\n return None\n\n std = math.sqrt((res_lst**2).sum()/(res_lst.size-len(p0)-1))\n\n if figname is not None:\n fig = plt.figure()\n ax1 = fig.add_axes([0.1, 0.4, 0.8, 0.5])\n ax2 = fig.add_axes([0.1, 0.1, 0.8, 0.25])\n ax1.plot(xdata, ydata, 'o', ms=4)\n newx = np.arange(xdata[0], xdata[-1], 0.1)\n newy = gaussian_bkg(p1[0], p1[1], p1[2], p1[3], newx)\n ax1.plot(newx, newy, '-', lw=0.6)\n yerr = errfunc2(p1, xdata, ydata)\n ax2.plot(xdata, yerr, 'o', ms=4)\n ax1.set_xlim(xdata[0], xdata[-1])\n ax2.set_xlim(xdata[0], xdata[-1])\n fig.savefig(figname)\n plt.close(fig)\n\n return i1, i2, p1, std", "def _amplitudeFromPeak(peak, x, y, radius, x_0=10, y_0=10):\n rz = jn_zeros(1, 1)[0] / np.pi\n r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / rz)\n if r == 0.:\n return peak\n rt = np.pi * r\n z = (2.0 * j1(rt) / rt)**2\n amp = peak / z\n return amp", "def evaluate_peak_norm(x, y, amplitude, x_0, y_0, r_0):\n rr = (x - x_0) ** 2 + (y - y_0) ** 2\n rr_0 = r_0 ** 2\n\n # Because np.select evaluates on the whole rr array\n # we have to catch the invalid value warnings\n with np.errstate(invalid='ignore'):\n values = np.select([rr <= rr_0, rr > rr_0], [np.sqrt(rr_0 - rr), 0])\n return amplitude * values / r_0", "def single_peak(x, mean, sigma, alpha):\n return skewnorm.pdf(x, alpha, mean, sigma)", "def spectral_power(img, avg_window_size=None, log=True): #COMPLETE spectrum generator\r\n image = img.copy()\r\n # to avoid large spectral power at the 0 frequency :\r\n image -= np.mean(image)\r\n # wiener filter to reduce non physical variability in the spectral power\r\n if avg_window_size:\r\n N = avg_window_size\r\n image = wiener(image, (N, N))\r\n # compute the spectral power function. Place the 0 frequency-component in the center\r\n fshift = np.fft.fftshift(np.fft.fft2(image))\r\n spectrum = np.abs(fshift)**2\r\n if log:\r\n spectrum = 10*np.log(spectrum)\r\n return spectrum", "def calculate_wavelength_metric(wavelength_min, wavelength_max):\n length_max = np.log(550) * 2\n wavelength = np.abs(wavelength_max + wavelength_min) / 2\n log_wl = np.log(wavelength)\n default_met = np.array(log_wl / length_max)\n scaled_met = 1.75 * (default_met - 0.5) + 0.5\n if wavelength == 0:\n return 0\n else:\n return scaled_met.clip(min=10e-11, max=1)", "def power(self):\r\n return self.model * self.percent / 100", "def getMeanPowerAmplitude(self):\r\n\r\n try:\r\n finalPatches = self.finalPatchesMarked\r\n except AttributeError:\r\n finalPatches = self.finalPatches\r\n\r\n try:\r\n powerMap = ia.array_nor(np.mean([self.altPowerMapf, self.aziPowerMapf], axis=0))\r\n except AttributeError:\r\n _ = self._getSignMap()\r\n powerMap = ia.array_nor(np.mean([self.altPowerMapf, self.aziPowerMapf], axis=0))\r\n\r\n # get V1 mean fluorscence\r\n try:\r\n V1 = finalPatches['V1']\r\n except KeyError:\r\n V1 = finalPatches['patch01']\r\n\r\n V1array = V1.array\r\n\r\n V1area = np.sum(V1array).astype(np.float)\r\n V1totalPower = np.sum(V1array * powerMap).astype(np.float)\r\n V1meanPower = V1totalPower / V1area\r\n\r\n # get mean power amplitude for all visual areas normalized by V1\r\n meanPowerDict = {}\r\n for key, patch in finalPatches.items():\r\n array = patch.array\r\n\r\n area = np.sum(array).astype(np.float)\r\n\r\n totalPower = np.sum(array * powerMap).astype(np.float)\r\n\r\n meanPowerNor = (totalPower / area) / V1meanPower\r\n\r\n meanPowerDict.update({key: meanPowerNor})\r\n\r\n return meanPowerDict", "def getDemodGain(self, inpwr):\n return min(100, max(0, int(100-2*(inpwr+40))))", "def get_slope(self, device_type_name):\n\n if device_type_name in [\"SOLN\", \"BEND\",\"BLEN\",\"KICK\"]:\n # Solenoid devices use 'uA'.\n return 0.00055586\n elif device_type_name in [\"BLM\",\"LBLM\",\"CBLM\",\"PBLM\"]:\n # Beam loss monitors set threshold in Volts initially\n return 1.6/65536\n else:\n raise ValueError(\"Function \\\"__get_slope(device_type_name={}, fault_name={})\\\". Invalid device type name\"\n .format(device_type_name, fault_name))", "def minwavelen(self):\n return self._minwavelen", "def get_power(self):\r\n x = self.query('POW?')\r\n if x == None: return None\r\n return float(x)", "def signal_power(signal_input, *arg):\r\n signal_T = np.array(signal_input)\r\n energy = np.abs(signal_T) ** 2\r\n if len(arg) < 1:\r\n power = np.mean(energy)\r\n else:\r\n power = np.mean(energy, axis=arg[0])\r\n return power", "def spectral_slope(sign, fs):\n f, ff = plotfft(sign, fs)\n if not(list(f)):\n return 0\n else:\n if not (len(f) * np.dot(f, f) - np.sum(f) ** 2):\n return 0\n else:\n return (len(f) * np.dot(f, ff) - np.sum(f) * np.sum(ff)) / (len(f) * np.dot(f, f) - np.sum(f) ** 2)", "def power_spectral_density(var):\n\n n = len(var)\n Y = np.fft.fft(var)/n # fft computing and normalization\n Y = Y[range(int(n/2))]\n k = np.arange(n)\n Ts = 30*60 # sampling interval (sec)\n Fs = 1./Ts\n T = n/Fs\n frq = k/T # two sides frequency range\n frq = frq[range(int(n/2))] # one side frequency range\n\n return frq, abs(Y)", "def calc_peakt(self, trial_dur):\n if trial_dur <= 11.0:\n peakt = 0.5375*trial_dur + 6.09625\n else:\n peakt = 11.75\n return peakt", "def show_waveform(self, peaks=[]):\n if peaks is None:\n peaks = []\n data = self.amplitude\n x_axis = range(0, len(data))\n x_axis = [x / self.fs for x in x_axis]\n plt.plot(x_axis, data)\n plt.axhline(self.height)\n for p in peaks:\n plt.axvline(p / self.fs, color=\"red\", alpha=0.2)\n plt.ylabel(\"Amplitude\")\n plt.xlabel(\"Time (seconds)\")\n plt.title(\"Waveform\")\n plt.show()", "def power(self):\n return self._power", "def test_fix_fingers_xmin_peak(self):\n\t\tself.watcher = ww.WeightWatcher(model=self.model, log_level=logging.WARNING)\n\t\t\t\n\t\t# default\n\t\tdetails = self.watcher.analyze(layers=[self.second_layer], xmax=FORCE, pl_package=POWERLAW_PACKAGE)\n\t\tactual = details.alpha.to_numpy()[0]\n\t\texpected = 7.116304\n\t\tprint(\"ACTUAL {}\".format(actual))\n\t\tself.assertAlmostEqual(actual,expected, places=2)\n\n\t\t# XMIN_PEAK xmax FORCED\n\t\tdetails = self.watcher.analyze(layers=[self.second_layer], fix_fingers='xmin_peak', xmax=FORCE, xmin_max=1.0, pl_package=POWERLAW_PACKAGE)\n\t\tactual = details.alpha[0]\n\t\tactual = details.alpha.to_numpy()[0]\n\t\texpected = 1.68\n\t\tdelta = 0.01\n\t\tself.assertAlmostEqual(actual,expected, None, '', delta)\n\t\t\n\t\t\n\t\t# XMIN_PEAK xmax None, sligltly different alphja\n\t\tdetails = self.watcher.analyze(layers=[self.second_layer], fix_fingers='xmin_peak', xmin_max=1.0, pl_package=POWERLAW_PACKAGE)\n\t\tactual = details.alpha[0]\n\t\tactual = details.alpha.to_numpy()[0]\n\t\texpected = 1.72\n\t\tdelta = 0.01\n\t\tself.assertAlmostEqual(actual,expected, None, '', delta)", "def __pow__( self, power ):\r\n\t\tif ( power > 0 ):\r\n\t\t\treturn fraction( self.numerator ** power, self.denominator ** power )\r\n\t\tif ( power < 0 ):\r\n\t\t\treturn fraction( self.denominator ** abs( power ), self.numerator ** abs( power ) )\r\n\t\treturn 1", "def getMaxPower(self):\n return self.max_power", "def testPeakLikelihoodFlux(self):\n # make mp: a flux measurer\n measControl = measAlg.PeakLikelihoodFluxControl()\n schema = afwTable.SourceTable.makeMinimalSchema()\n mp = measAlg.MeasureSourcesBuilder().addAlgorithm(measControl).build(schema)\n \n # make and measure a series of exposures containing just one star, approximately centered\n bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(100, 101))\n kernelWidth = 35\n var = 100\n fwhm = 3.0\n sigma = fwhm/FwhmPerSigma\n convolutionControl = afwMath.ConvolutionControl()\n psf = measAlg.SingleGaussianPsf(kernelWidth, kernelWidth, sigma)\n psfKernel = psf.getLocalKernel()\n psfImage = psf.computeKernelImage()\n sumPsfSq = numpy.sum(psfImage.getArray()**2)\n psfSqArr = psfImage.getArray()**2\n for flux in (1000, 10000):\n ctrInd = afwGeom.Point2I(50, 51)\n ctrPos = afwGeom.Point2D(ctrInd)\n\n kernelBBox = psfImage.getBBox(afwImage.PARENT)\n kernelBBox.shift(afwGeom.Extent2I(ctrInd))\n\n # compute predicted flux error\n unshMImage = makeFakeImage(bbox, [ctrPos], [flux], fwhm, var)\n\n # filter image by PSF\n unshFiltMImage = afwImage.MaskedImageF(unshMImage.getBBox(afwImage.PARENT))\n afwMath.convolve(unshFiltMImage, unshMImage, psfKernel, convolutionControl)\n \n # compute predicted flux = value of image at peak / sum(PSF^2)\n # this is a sanity check of the algorithm, as much as anything\n predFlux = unshFiltMImage.getImage().get(ctrInd[0], ctrInd[1]) / sumPsfSq\n self.assertLess(abs(flux - predFlux), flux * 0.01)\n \n # compute predicted flux error based on filtered pixels\n # = sqrt(value of filtered variance at peak / sum(PSF^2)^2)\n predFluxErr = math.sqrt(unshFiltMImage.getVariance().get(ctrInd[0], ctrInd[1])) / sumPsfSq\n\n # compute predicted flux error based on unfiltered pixels\n # = sqrt(sum(unfiltered variance * PSF^2)) / sum(PSF^2)\n # and compare to that derived from filtered pixels;\n # again, this is a test of the algorithm\n varView = afwImage.ImageF(unshMImage.getVariance(), kernelBBox)\n varArr = varView.getArray()\n unfiltPredFluxErr = math.sqrt(numpy.sum(varArr*psfSqArr)) / sumPsfSq\n self.assertLess(abs(unfiltPredFluxErr - predFluxErr), predFluxErr * 0.01)\n \n for fracOffset in (afwGeom.Extent2D(0, 0), afwGeom.Extent2D(0.2, -0.3)):\n adjCenter = ctrPos + fracOffset\n if fracOffset == (0, 0):\n maskedImage = unshMImage\n filteredImage = unshFiltMImage\n else:\n maskedImage = makeFakeImage(bbox, [adjCenter], [flux], fwhm, var)\n # filter image by PSF\n filteredImage = afwImage.MaskedImageF(maskedImage.getBBox(afwImage.PARENT))\n afwMath.convolve(filteredImage, maskedImage, psfKernel, convolutionControl)\n\n exposure = afwImage.makeExposure(filteredImage)\n exposure.setPsf(psf)\n \n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*adjCenter))\n measFlux = source.get(measControl.name)\n measFluxErr = source.get(measControl.name + \".err\")\n self.assertFalse(source.get(measControl.name + \".flags\"))\n self.assertLess(abs(measFlux - flux), flux * 0.003)\n \n self.assertLess(abs(measFluxErr - predFluxErr), predFluxErr * 0.2)\n\n # try nearby points and verify that the flux is smaller;\n # this checks that the sub-pixel shift is performed in the correct direction\n for dx in (-0.2, 0, 0.2):\n for dy in (-0.2, 0, 0.2):\n if dx == dy == 0:\n continue\n offsetCtr = afwGeom.Point2D(adjCenter[0] + dx, adjCenter[1] + dy)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, offsetCtr)\n offsetFlux = source.get(measControl.name)\n self.assertLess(offsetFlux, measFlux)\n \n # source so near edge of image that PSF does not overlap exposure should result in failure\n \n for edgePos in (\n (1, 50),\n (50, 1),\n (50, bbox.getHeight() - 1),\n (bbox.getWidth() - 1, 50),\n ):\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*edgePos))\n self.assertTrue(source.get(measControl.name + \".flags\"))\n \n # no PSF should result in failure: flags set\n noPsfExposure = afwImage.ExposureF(filteredImage)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, noPsfExposure, afwGeom.Point2D(*adjCenter))\n self.assertTrue(source.get(measControl.name + \".flags\"))", "def maxwavelen(self):\n return self._maxwavelen", "def get_peakiness(spot_data):\n return spot_data[3] / np.mean((spot_data[5], spot_data[6]))", "def _powerperdegree(self):\n return SHPowerSpectrum(self.coeffs)", "def fit_sky(self):\n min_value = self.data.min()\n ring_model = models.Ring2D(\n min_value, self.x, self.y, self._box * 0.4, width=self._box * 0.4\n )\n ring_model.r_in.fixed = True\n ring_model.width.fixed = True\n ring_model.x_0.fixed = True\n ring_model.y_0.fixed = True\n fit_p = fitting.LevMarLSQFitter()\n return fit_p(ring_model, self._XGrid, self._YGrid, self.data).amplitude", "def excitationPulse(self, time, power):\n t = time * ns + self.step # Should center at one step before 0\n if self.step <= 200 * ps: # resolution warrants modelling the pulse\n width = 200.0 * ps # self.step\n\n if t < width * 10: # Only evaulate when the value is significant\n amp = power / (width * sqrt_2pi) # normalized amplitude\n value = amp * np.exp(-1.0 * (t) * (t) / (2 * width * width))\n value = value\n else:\n value = 0.0\n else: # impulsive limit, just dump all the excitons in at t=0\n # if time >= 0 - self.step/2 and time < 0 + self.step/2:\n if t > -0.5 * self.step and t <= 0.5 * self.step:\n value = power / self.step\n else:\n value = 0.0\n return (value*self.step)", "def return_power_spectrum(self, freq_signal, time_signal):\n\n\t\tfreq_signal **= 2 \n\t\tlen_fts = len(freq_signal)\n\t\tlen_signal = len(signal)\n\n\t\tif len_signal % 2:\n\t\t\tfreq_signal[1:len_fts] *= 2\n\n\t\telse:\n\t\t\tfreq_signal[1:len_fts-1] *= 2\n\n\t\treturn freq_signal", "def set_peak(self, p):\n self.peak = p", "def calculateMaxAmplitude(sampleWidth: int) -> int:\n return 2 ** (sampleWidth * NUM_BITS_IN_A_BYTE - 1) - 1", "def waveband(self):\n return self.get(\"waveband\")", "def lamg(freq, w):\n la = lam(freq)\n return la / _np.sqrt(1.0 - (la / (2 * w)) ** 2) # wavelength in WG-62 waveguide", "def wave(self):\n return self._wave", "def power(self):\n return irradiance_on_plane(self.vnorm, self.h,\n self.date, self.lat) * self.s * self.eff", "def set_wavelength(self, wavelength: float) -> None:\n\n assert isinstance(wavelength, float), \"Incompatible type\"\n\n #:SENSe[n][:CHANnel[m]]:POWer:WAVelength /?\n self._inst.write(\"SENS:POW:WAV {}\".format(wavelength))", "def FWHM(self):\n # The width of the Lorentz profile\n fl = 2.0 * self[\"al\"]\n # Width of the Gaussian [2.35 = 2*sigma*sqrt(2*ln(2))]\n fd = 2.35482 * self['ad']\n return 0.5346 * fl + numpy.sqrt(0.2166 * (fl**2.) + fd**2.)", "def _powerperdegree(self):\n return SHCPowerSpectrum(self.coeffs)", "def current_power_w(self):\n if self._devtype == \"pod\":\n return self._current_consumption\n return False", "def spectral_abs_slope_mean(data, fft_data):\n spec = np.abs(fft_data)\n slope = np.abs(np.diff(spec))\n return np.mean(slope)", "def power_amplifier(s, thresh, pwr=2):\n\n #normalize the signal\n s /= s.max()\n\n #shift the signal so elements at the threshold are set to 1\n s += 1.0 - thresh\n\n #raise the envelope to a power, amplifying values that are above 1\n s = s**pwr\n\n #re-normalize\n s -= (1.0 - thresh)**pwr\n s /= s.max()\n\n return s", "def power_flux_at_distance(power: float, distance: float,\n mode: str = 'dBW') -> float:\n\n field_strength = field_strength_at_distance(power, distance, mode)\n\n power_flux = field_strength - 145.8\n\n return power_flux" ]
[ "0.65199757", "0.65100896", "0.6436953", "0.6426267", "0.6408306", "0.62890404", "0.6252019", "0.624753", "0.6180097", "0.61793584", "0.6162006", "0.6152941", "0.61026114", "0.6093139", "0.6071749", "0.6033385", "0.59775233", "0.59704584", "0.5951522", "0.5938655", "0.5900657", "0.58935624", "0.5891705", "0.5880766", "0.5878145", "0.5877815", "0.58640546", "0.5856577", "0.58406913", "0.5830427", "0.5829542", "0.58269423", "0.5825448", "0.581779", "0.58157176", "0.58157176", "0.58094066", "0.5806944", "0.5803237", "0.58014774", "0.5800042", "0.57921505", "0.5789859", "0.57693535", "0.5766361", "0.5750858", "0.5750741", "0.57462597", "0.57449955", "0.57279056", "0.5726809", "0.57227236", "0.5701733", "0.5700424", "0.56996524", "0.56996524", "0.56988364", "0.56915444", "0.5675742", "0.5671954", "0.56597984", "0.5655974", "0.5651567", "0.5648294", "0.5628923", "0.56285983", "0.56274825", "0.56245536", "0.5622105", "0.56185865", "0.5596651", "0.55958885", "0.5570705", "0.55687857", "0.5566247", "0.5560518", "0.5558804", "0.555821", "0.55537933", "0.55455786", "0.55280685", "0.55162776", "0.5507141", "0.5493388", "0.5492392", "0.54840255", "0.5483295", "0.546035", "0.54566306", "0.5456327", "0.54453397", "0.5440652", "0.5439781", "0.5434588", "0.5428122", "0.5425644", "0.5422914", "0.54168504", "0.54131544", "0.54131055" ]
0.5508018
82
Make a hex string from the venue names to use as a unique id. Only the last 8 characters are used for the unique id.
def make_unique_id(venue_list): md5_hash = md5() for name in venue_list: md5_hash.update(name) hash_hex = md5_hash.hexdigest() return hash_hex[-8:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_uuid():\n parts = [Record._hex_string(k) for k in Record.UUID_PARTS]\n return \"-\".join(parts)", "def _unique_id():\n id = \"\"\n for i in xrange(0,8):\n id += choice(ascii_letters)\n return id", "def format_unique_id(address: str) -> str:\n return address.replace(\":\", \"\").lower()", "def makeid(cls):\n return str(uuid.uuid4().hex)", "def _id(target):\n return ''.join([hex(char) for char in bytearray(target)])", "def unique_id() -> str:", "def get_hex_id(fullRouterName):\n hexId = \"\"\n if fullRouterName.count(\"=\") > 0:\n hexId = fullRouterName.split(\"=\")[0]\n else:\n hexId = fullRouterName.split(\"~\")[0]\n hexId = hexId.replace(\"$\", \"\")\n return hexId", "def create_hash_hex(self, vehicles):\n field = \"\"\n for i, vehicle in enumerate(vehicles):\n if vehicle.orientation == 'H':\n x = vehicle.x\n if x == 10:\n x = \"a\"\n elif x == 11:\n x = \"b\"\n field += str(x)\n else:\n y = vehicle.y\n if y == 10:\n y = \"a\"\n elif y == 11:\n y = \"b\"\n field += str(y)\n return field", "def generate_id(self):\n unique_id = \"\"\n\n while len(unique_id) < self.id_length:\n ascii_number = self.get_random_bits()\n\n if self.is_approved_ascii(ascii_number):\n random_char = chr(ascii_number)\n\n if not self.is_excluded_char(random_char):\n unique_id += chr(ascii_number)\n\n return unique_id", "def _generate_id() -> str:\n return \"\".join(sample(\"abcdefghjkmopqrstuvqxyz\", 16))", "def unique_id() -> bytes:", "def generate_id():\n return str(uuid.uuid4())[:5].replace('e','a')", "def unique_str():\n return hex(random.randint(0, 256 * 256 * 256 * 256 - 1))[2:]", "def _generate_tracking_number(self):\n return uuid.uuid4().hex.upper()", "def _NewUUIDString ():\n if __HaveUUID:\n return uuid.uuid1().urn\n return '%s:%08.8x' % (time.strftime('%Y%m%d%H%M%S'), random.randint(0, 0xFFFFFFFF))", "def make_trace_id(trace_id: bytes) -> str:\n return base64.b64encode(trace_id).decode(\"utf-8\")", "def _make_uuid(val):\n h = hashlib.md5(val).hexdigest()\n return '{0}-{1}-{2}-{3}-{4}'.format(\n h[:8], h[8:12], h[12:16], h[16:20], h[20:])", "def uuid():\n return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(32))", "def create_uid():\n return random_string(5, string.hexdigits.lower())\n # return (\"%x\" % (int(time.time()) * 0x10 % 0x1000000000)\n # + random_string(7, string.hexdigits.lower()))", "def gen_uuid() -> str:\n return str(uuid4())", "def _create_finding_id(control_id, resource_name, length=20):\n input = control_id + resource_name\n hex = hashlib.sha256(input.encode('UTF-8')).hexdigest()\n result = int(hex, 16) % (10 ** length)\n return str(result)", "def unique_id(self) -> str:\n return '{0}_{1}'.format(self._mac.replace(':', ''), self.entity_id)", "def generate_order_id():\n rands = []\n for i in range(0, 16):\n r = random()\n rand = 4294967296.0 * r\n rands.append((int(rand) >> ((3 & i) << 3)) & 255)\n\n hexa = []\n for i in range(0, 256):\n hexa.append(str(hex(i+256)).lstrip(\"0x\").rstrip(\"L\")[1:])\n\n id = \"\"\n for i in range(0, 16):\n id += hexa[rands[i]]\n\n if (i == 3) or (i == 5) or (i == 7) or (i == 9):\n id += \"-\"\n\n return(id)", "def new_uid():\n return str(uuid.uuid1())[:30]", "def get_uuid(s):\n sha = sha256(s.encode('utf-8')).hexdigest()\n uuid = UUID(sha[:32])\n return str(uuid)", "def gen_uuid():\n return str( uuid.uuid4() )", "def gen_uuid():\n return str( uuid.uuid4() )", "def gen_uuid():\n return str(uuid.uuid4())", "def generate_id():\n return uuid4().get_hex()", "def generate_id(employee_id) :\n\n\t\thash_bits = random.getrandbits(128)\n\t\thash_code = \"%032x\" % hash_bits\n\t\thash = hash_code[:6] + str(employee_id)\n\n\t\treturn hash", "def _str2id(text):\n return sha1(text).hexdigest()", "def getHash():\n return str(uuid.uuid4())[-17:].replace(\"-\", \"\")", "def stringify(self):\n hexcode = \"#\"\n for x in self.value:\n part = hex(x)[2:]\n if len(part) < 2: part = \"0\" + part\n hexcode += part\n return hexcode", "def _create_id(length=40):\n\n numbers = map(str, range(10))\n letters = string.ascii_lowercase\n options = [*letters[:letters.index('f') + 1], *numbers]\n\n return ''.join(random.choice(options) for _ in range(length))", "def advertised_id(self):\n namespace = '0x' + self.uuid[:8] + self.uuid[-12:]\n major, minor = map(int, (self.major, self.minor))\n temp_instance = self._append_hex(major, minor)\n instance = self._add_padding(temp_instance)\n beacon_id = self._append_hex(int(namespace, 16), instance)\n return base64.b64encode(self.long_to_bytes(beacon_id))", "def get_unique_id(name: str) -> str:\n name = get_data_source(name)\n suffixes = \".\".join(sfx for sfx in get_format_suffixes(name) if sfx)\n return re.sub(rf\"[.]{suffixes}$\", \"\", name)", "def unique_id(self) -> str:\n return f\"{self._mac}_tracker\"", "def guid():\n base_uuid = uuid.uuid4()\n number = base_uuid.int & ((2 ** 20) - 1)\n return base62_encode(number)", "def _generate(self, hashed = True):\r\n\r\n identifier = str(uuid.uuid4())\r\n identifier = identifier.upper()\r\n if not hashed: return identifier\r\n identifier = legacy.bytes(identifier)\r\n hash = hashlib.sha256(identifier)\r\n identifier = hash.hexdigest()\r\n identifier = identifier.upper()\r\n return identifier", "def unique_id(self) -> str:\n return f\"{self.wallet_id}{self.WALLET_KEY_POSTFIX}\"", "def generate_subsegment_id():\n return uuid.uuid4().hex[:16]", "def genSCID():\n scid_hex = getRandomBytes(8)\n scid_hex = getSHA256Hex(scid_hex)\n scid_hex = scid_hex[0:8]\n return scid_hex", "def str_id(self, id):\n\t\ts = bin(id)\n\t\ts = '0'*64 + s[2:]\n\t\tidbits = 64 - (2*self.xybits+self.tbits)\n\t\tret = []\n\t\tnover = self.xybits - self.level\n\t\tfor l, k in [(0, idbits), (0, self.tbits), (nover, self.xybits), (nover, self.xybits)]:\n\t\t\tc = s[-k:]\n\t\t\ts = s[:-k]\n\t\t\tif l:\n\t\t\t\tc = c[:-l] + '[' + c[-l:] + ']'\n\t\t\tret.insert(0, c)\n\t\treturn ' '.join(ret)", "def generate_id():\n\treturn \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())", "def getAAAZZZSId(self):\n return f\"{self.a}{self.z:>03d}{self.state}\"", "def create_ids(input):\r\n return hashlib.md5(input)", "def fill_id(id):\n if len(str(id)) < 7:\n length = len(str(id))\n id = \"0\"*(7 - length) + str(id)\n return str(id)", "def _encode_name(self, name):\n uuid_str = name.replace(\"-\", \"\")\n vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)\n vol_encoded = base64.urlsafe_b64encode(vol_uuid.bytes)\n if six.PY3:\n vol_encoded = vol_encoded.decode('ascii')\n return vol_encoded[:19]", "def generate_ids():\n payloads = ['info', 'ad_tracking', 'airdrop', 'store', 'siri', 'desktop', 'desktop_services', 'dock', 'energy',\n 'filevault', 'finder', 'firewall', 'itunes', 'login', 'passcode', 'password', 'restrictions', 'safari',\n 'screensaver', 'setup', 'software', 'diagnostics', 'policy', 'policy_2', 'preferences',\n 'preferences_security', 'time_machine']\n ids = {}\n for i, payload in enumerate(payloads):\n identifier = str(uuid.uuid4()).upper()\n ids[payload] = identifier[9:]\n return ids", "def generate_uuids():\n uuid_start = str(uuid())\n while uuid_start.startswith(\"zzzzzzzz\"):\n uuid_start = str(uuid())\n uuid_end = list(deepcopy(uuid_start))\n \n char_pool = list(string.digits) + \\\n list(string.ascii_uppercase) + \\\n list(string.ascii_lowercase) \n # print(f\"char_pool: {char_pool}\")\n substitute_char = ''\n i = 0\n while i < 8:\n char_from_start_uuid = uuid_start[i]\n if char_from_start_uuid == \"z\":\n i += 1\n continue\n else:\n next_index_in_pool = char_pool.index(char_from_start_uuid) + 1\n substitute_char = char_pool[next_index_in_pool]\n break\n uuid_end[i] = substitute_char\n uuid_end = ''.join(uuid_end)\n print(f\"generated uuids: {uuid_start}, {uuid_end}\")\n return uuid_start, str(uuid_end)", "def generate_anki_guid() -> str:\n\n def base62(num: int, extra: str = \"\") -> str:\n s = string\n table = s.ascii_letters + s.digits + extra\n buf = \"\"\n while num:\n num, i = divmod(num, len(table))\n buf = table[i] + buf\n return buf\n\n _base91_extra_chars = \"!#$%&()*+,-./:;<=>?@[]^_`{|}~\"\n\n def base91(num: int) -> str:\n # all printable characters minus quotes, backslash and separators\n return base62(num, _base91_extra_chars)\n\n return base91(random.randint(0, 2 ** 64 - 1))", "def generate_id():\n return \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())", "def unique_id(self):\r\n name_slug = slugify(self._name)\r\n return f\"{name_slug}\"", "def int2hex(n: int) -> str:", "def encode(uuid_):\n return base64.urlsafe_b64encode(uuid_.bytes)[:-2] # Drop '==' padding", "def uuid( *args ):\n t = long( time.time() * 1000 )\n r = long( random.random()*100000000000000000L )\n try:\n a = socket.gethostbyname( socket.gethostname() )\n except:\n # if we can't get a network address, just imagine one\n a = random.random()*100000000000000000L\n data = str(t)+' '+str(r)+' '+str(a)+' '+str(args)\n data = hashlib.md5(data).hexdigest()\n return data", "def unique_name():\n return \"unique-{0}\".format(uuid.uuid4())", "def unique_id() -> bytes:\n ...", "def genShareID(store):\n return unicode(os.urandom(16).encode('hex'), 'ascii')", "def _mk_coref_id():\n num, alpha = int(_mk_coref_id.id[:-1]), _mk_coref_id.id[-1]\n if alpha == 'Z':\n alpha = 'A'\n num += 1\n else:\n alpha = chr(ord(alpha) + 1)\n\n _mk_coref_id.id = '%s%s' % (num, alpha)\n return _mk_coref_id.id", "def id(self):\n return \"{model:s}--{serial:08x}\".format(model=self.model.replace('-',''), serial=self.serial_number).lower()", "def create_id(uid, begintime, endtime):\n allowed_chars = string.ascii_lowercase[:22] + string.digits\n temp = re.sub('[^{}]'.format(allowed_chars), '', uid.lower())\n return re.sub('[^{}]'.format(allowed_chars), '', uid.lower()) + str(arrow.get(begintime).timestamp) + str(arrow.get(endtime).timestamp)", "def uid():\r\n u = str(uuid.uuid4())[:22]\r\n u = u.replace(\"-\",\"_\")\r\n return u", "def read_guid(self):\n return ''.join(self.hextostring(i) for i in self.read('bytes:16'))", "def genLowCaseID(size):\n\tid = \"\"\n\tfor i in range(size):\n\t\tid = id + selectRandomFromList(loCaseChars)\n\treturn id", "def genHexStr(instr: str) -> str:\n\n return hashlib.md5(instr.encode(\"utf-8\")).hexdigest()", "def unique_id(self) -> str:\n return \"{}-{}-{}\".format(*self._id)", "def new_case_id():\n return uuid.uuid4().hex", "def new_case_id():\n return uuid.uuid4().hex", "def nice():\n rawBytes = uuid.uuid4().bytes\n rawBytes =bytes(chr((rawBytes[0]) & 0x7f),'ascii')+rawBytes[1:] # Ensure slug starts with [A-Za-f]\n return base64.urlsafe_b64encode(rawBytes)[:-2] # Drop '==' padding", "def _generate_uuid():\n return str(uuid.uuid4())", "def uuid4():\n b = ''.join('%x' % x for x in os.urandom(16))\n return \"%s-%s-%s-%s-%s\" % (b[0:8], b[8:12], b[12:16], b[16:20], b[20:])", "def tubeid():\n return binascii.hexlify(os.urandom(12))", "def uuid_to_base62():\n integer = uuid4().int\n base = string.digits + string.ascii_letters\n if integer == 0:\n return base[0]\n\n length = len(base)\n ret = ''\n while integer != 0:\n ret = base[integer % length] + ret\n integer = integer // length\n\n return ret", "def create_hash() -> str:\n length = 6\n char = string.ascii_uppercase + string.digits + string.ascii_lowercase\n\n # Generate a new ID, until one is found that is unique\n while True:\n hash = \"\".join(random.choice(char) for _ in range(length))\n\n if not utils.cache_is_hash_taken(hash):\n return hash", "def generate_unique_name():\n return 'titanic-' + str(get_mac())", "def GenDistinctId(self):\t\n \"\"\"4 bits to unique a machine \\\n\t5 bits for processes\"\"\"\n\tmachineId = format(self.mid, 4)\n processId = format(self.pid) \n \treturn machineId + processId", "def get_uuid():\n return str(uuid4())", "def unique_hash(only_letters=False):\n\n if only_letters:\n return ''.join((chr(int(x) + 97) if x.isdigit() else x)\n for x in uuid.uuid4().hex)\n return uuid.uuid4().hex", "def create_identifier(query):\n return sha1(query.encode()).hexdigest()[:20]", "def generate_message_control_id():\n d = datetime.datetime.utcnow()\n # Strip off the decade, ID only has to be unique for 3 years.\n # So now we have a 16 char timestamp.\n timestamp = d.strftime(\"%y%j%H%M%S%f\")[1:]\n # Add 4 chars of uniqueness\n unique = \"\".join(random.sample(alphanumerics, 4))\n return timestamp + unique", "def v4():\n return base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2] # Drop '==' padding", "def generate_wallet_id(cls) -> str:\n return str(uuid.uuid4())", "def get_debug_firmware_id_string(self):\n # Read the address via get_var_strict; this will fetch the value\n # from chipdata as well, but we can ignore it.\n chip_str = self.chipdata.get_var_strict('$_build_identifier_string')\n rawstr = self.debuginfo.get_dm_const(chip_str.address, chip_str.size)\n\n decoded_str = \"\"\n for chars in rawstr:\n if Arch.addr_per_word == 4:\n # The raw string is encoded with four chars per word\n string = cu.get_string_from_word(Arch.addr_per_word, chars)\n stop_decoding = False\n for char in string:\n if char != '\\0':\n decoded_str += char\n else:\n stop_decoding = True\n break\n if stop_decoding:\n break\n else:\n # The raw string is encoded with two chars per word\n upper_part = (chars & 0xFF00) >> 8\n lower_part = chars & 0x00FF\n # strip the null terminator.\n if upper_part != 0:\n decoded_str += chr(upper_part)\n else:\n break\n if lower_part != 0:\n decoded_str += chr(lower_part)\n else:\n break\n\n return decoded_str.strip() # Strip any leading/trailing whitespace", "def id_generator():\r\n new_id = uuid.uuid4()\r\n return new_id.hex", "def make_id():\n global _simple_id\n\n import uuid\n from ..settings import settings\n\n if settings.simple_ids(False):\n _simple_id += 1\n new_id = _simple_id\n else:\n new_id = uuid.uuid4()\n return str(new_id)", "def _guid64():\n return _base91(random.randint(0, 2**64 - 1))", "def name_to_id(player_name):\n # This is fairly unsophisticated, just does a CRC32 on the name. Can be\n # optimized both for compute requirements and collision frequency using\n # another hashing algorithm.\n return binascii.crc32(player_name) & 0xFFFFFFFF", "def unpack_uuid(data):\n return data[:16], 16", "def full_id(schema_obj):\n\n return '0x%08x' % ((schema_obj.parent.number << 16) | schema_obj.number)", "def _generate_omf_asset_id(asset_code):\n\n asset_id = asset_code.replace(\" \", \"\")\n return asset_id", "def string_id(length=8):\n return ''.join(random.choice(string.ascii_letters +\n string.digits)\n for _ in range(length))", "def guid( *args ):\n\tt = long( time.time() * 1000 )\n\tr = long( random.random()*100000000000000000L )\n\ttry:\n\t\ta = socket.gethostbyname( socket.gethostname() )\n\texcept:\n\t\t# if we can't get a network address, just imagine one\n\t\ta = random.random()*100000000000000000L\n\tdata = str(t)+' '+str(r)+' '+str(a)+' '+str(args)\n\tdata = hashlib.md5(data).hexdigest()\n\n\treturn data", "def get_rand_hex(string_len=8):\n assert isinstance(string_len, int) and string_len > 0\n randos = \"\".join(str(uuid4()).split('-'))\n assert string_len <= len(randos)\n string_len = -string_len\n return randos[string_len:]", "def convert_guid_intstr(guid):\n return str(int(guid, 16))", "def generateUUID(): # pylint: disable=C0103\r\n return str(uuid.uuid4())", "def generate_id(length: int = 8):\n return \"\".join(random.choices(string.ascii_uppercase, k=length))", "def Illumina_ID(rid):\n index = rid.find(\":\") # finds the first occurance of ':'\n new_id = rid[:index] + \":1:12345\" + rid[index:]\n new_id_split = re.split(\"#|/\", new_id)\n new_id = new_id_split[0] + \" \" + new_id_split[2] + \":Y:0:\" + new_id_split[1]\n return new_id", "def _build_name(name_id):\n return \"xp_%08d\" % name_id", "def uuid4(short: bool = False) -> str:\n return str(uuid.uuid4())[:18 if not short else 8]" ]
[ "0.7108777", "0.67829424", "0.67574894", "0.66712064", "0.66090417", "0.6605987", "0.6547099", "0.643587", "0.6423709", "0.64172685", "0.6412221", "0.6411684", "0.63644415", "0.63502777", "0.6323095", "0.62953424", "0.6283156", "0.62323576", "0.61994445", "0.61936736", "0.61043406", "0.6089622", "0.60891914", "0.608694", "0.60810244", "0.6064978", "0.6064978", "0.6064894", "0.6049746", "0.60430974", "0.60330254", "0.60272896", "0.6009341", "0.5999098", "0.59970033", "0.59852445", "0.5980762", "0.59774494", "0.59720767", "0.59716237", "0.5962509", "0.59572715", "0.59536916", "0.5952459", "0.59516996", "0.59516376", "0.5950796", "0.5950192", "0.5949231", "0.5929416", "0.59219253", "0.5921331", "0.59143466", "0.59077543", "0.5904865", "0.59046876", "0.5903533", "0.5891883", "0.58851355", "0.5879264", "0.587631", "0.586355", "0.58523244", "0.5841022", "0.58390594", "0.5832224", "0.5827631", "0.5824276", "0.5824276", "0.5820716", "0.58175725", "0.5817193", "0.581045", "0.58073705", "0.5802796", "0.57933", "0.57909465", "0.5788801", "0.5785342", "0.57821774", "0.57790935", "0.5777697", "0.5773731", "0.57684684", "0.5767761", "0.57628906", "0.5741575", "0.57386863", "0.5737401", "0.5736614", "0.57264954", "0.57252765", "0.5721893", "0.5715069", "0.57087624", "0.57081366", "0.5704864", "0.570267", "0.5700385", "0.5699949" ]
0.8124629
0
This is the algorithm. Get the score between these two venues.
def score(cur_ven, ven): try: alpha = 750 numerator = (ven["rating"] * 0.75) + (2.5 * (1- eulers**(-ven["ratingSignals"]/144))) cur_coord = (cur_ven["location"]["lat"], cur_ven["location"]["lng"]) ven_coord = (ven["location"]["lat"], ven["location"]["lng"]) denominator = vincenty(cur_coord, ven_coord).meters + alpha except Exception as e: print "{}, \n has produced an error from {}".format(ven["name"], e) return float("-inf") return numerator / denominator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_score(self, a, b):\n ### FILL IN ###", "def __get_score(self):\n for pair in zip(self.nu[self.nu_idx:], self.sw[self.sw_idx:]):\n if pair[0] == pair[1]:\n self.score += 1\n else:\n break", "def reviewer_similarity_score(self, other: _Vertex) -> float:\n if self.degree() == 0 or other.degree == 0:\n return 0.0\n else:\n neighbours = self.neighbours\n other_neighbours = other.neighbours\n same_neighbours = neighbours.keys() & other_neighbours.keys()\n union = len(self.neighbours) + len(other.neighbours)\n sim_score_so_far = 0\n\n for vertex in same_neighbours:\n # 'bothered reviewing' bonus:\n sim_score_so_far += 1\n # 'love' bonus\n if self.neighbours[vertex] >= 9 and other.neighbours[vertex] >= 9:\n sim_score_so_far += 2\n # 'like' bonus\n elif self.neighbours[vertex] >= 7 and other.neighbours[vertex] >= 7:\n sim_score_so_far += 1\n\n return sim_score_so_far / union", "def matchbetween(self):\n team1_toss_factor, team2_toss_factor = self.toss_factor()\n\n avgScoredByTeam1 = self.team1.attack / self.team2.defense * team1_toss_factor\n avgScoredByTeam2 = self.team2.attack / self.team1.defense * team2_toss_factor\n\n\n while True:\n self.team1score = np.random.poisson(avgScoredByTeam1)\n self.team2score = np.random.poisson(avgScoredByTeam2)\n if self.team1score > self.team2score:\n self.team1.points += 3\n self.team1.won += 1\n self.team2.lost += 1\n self.winner = self.team1\n break\n elif self.team1score < self.team2score:\n self.team2.points += 3\n self.team2.won += 1\n self.team1.lost += 1\n self.winner = self.team2\n break\n else:\n if self.groupcheck is True:\n self.team1.points += 1\n self.team2.points += 1\n self.team1.tie += 1\n self.team2.tie += 1\n break\n self.team1.scored += self.team1score\n self.team2.scored += self.team2score\n self.team1.conceded += self.team2score\n self.team2.conceded += self.team1score\n self.team1.goaldifference += self.team1score-self.team2score\n self.team2.goaldifference += self.team2score-self.team1score", "def _get_similarity_score(self, dict1, dict2):\n try:\n majorScoreDeterminer1 = ['primaryGenreId']\n majorScoreDeterminer2 = ['genreIds']\n Score = 0 # Base Score\n for items in majorScoreDeterminer2:\n\n for item1 in self._get_app_param_info(dict1, resultCount=1, resultKey=items):\n if item1 in self._get_app_param_info(dict2, resultCount=1, resultKey=items):\n if Score == 0: # Add 50% base score for this category.\n Score += 2 * .5\n Score += 2 * .5 / len(self._get_app_param_info(dict1, resultCount=1, resultKey=items))\n\n for items in majorScoreDeterminer1:\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) in str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) and str(\n self._get_app_param_info(dict1, resultCount=1, resultKey=items)):\n Score += (3 / len(majorScoreDeterminer1))\n\n nameMatchScore = difflib.SequenceMatcher(None,\n self._get_app_param_info(dict1, resultCount=1,\n resultKey='trackName'),\n self._get_app_param_info(dict2, resultCount=1,\n resultKey='trackName')).ratio()\n Score += nameMatchScore\n\n minorScoreDeterminer = ['isGameCenterEnabled', 'languageCodesISO2A', 'contentAdvisoryRating', 'artistId',\n 'formattedPrice']\n\n for items in minorScoreDeterminer:\n if items == \"formattedPrice\":\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) == \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) == \"Free\":\n Score += (4 / (len(minorScoreDeterminer)))\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) == \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) != \"Free\":\n continue\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) != \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) == \"Free\":\n continue\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) != \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) != \"Free\":\n Score += (4 / (len(minorScoreDeterminer)))\n else:\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) in str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)):\n Score += (4 / (len(minorScoreDeterminer)))\n Score = round(Score, 1)\n log_str = \"id\" + str(self._get_app_param_info(dict2, resultCount=1, resultKey='trackId')) + \" - \" + str(\n self._get_app_param_info(dict2, resultCount=1, resultKey='trackName')) + \"\\tScore: \" + str(Score)\n except AssertionError as e:\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n except TypeError as e:\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n except:\n e = sys.exc_info()[0]\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n else:\n return log_str", "def matchscore(self):\n print(self.team1.name + \" \" + str(self.team1score) + \" - \" + str(self.team2score) + \" \" + self.team2.name)", "def score(stripe1, stripe2):\n scr = 0\n count = 0\n for p1, p2 in zip(stripe1, stripe2):\n r = abs(p1[0] - p2[0])\n g = abs(p1[1] - p2[1])\n b = abs(p1[2] - p2[2])\n scr += r + g + b\n return scr", "def get_score(self,sentence_1, sentence_2):\n\t return self.DELTA * self.semantic_similarity(sentence_1, sentence_2, True) + (1.0 - self.DELTA) * self.word_order_similarity(sentence_1, sentence_2)", "def score(self):", "def find_scores(self):\n p1_score = self.p1_store()\n p2_score = self.p2_store()\n return p1_score, p2_score", "def compare_venue(self, a, b):\n a_name = a['venue']['name']; b_name = b['venue']['name']\n if a_name < b_name:\n return -1\n elif a_name > b_name:\n return 1\n else:\n return 0", "def classify(self, source1, source2):\n\n scores1 = self.similarity_scores(source1)\n scores2 = self.similarity_scores(source2)\n \n print('scores for ' + source1.name +':' + str(self.similarity_scores(source1)))\n print('scores for ' + source2.name +':' + str(self.similarity_scores(source2)))\n \n source1_score=0\n source2_score=0\n\n for i in range(len(scores1)):\n if scores1[i]> scores2[i]:\n source1_score+=1\n elif scores2[i]>scores1[i]:\n source2_score+=1\n \n if source1_score > source2_score:\n win=source1.name\n else:\n win=source2.name\n\n print(self.name + ' is more likely to have come from ' + win)", "def similarity_scores(self, other):\n results = []\n\n words_score=compare_dictionaries(other.words, self.words)\n wordl_score=compare_dictionaries(other.word_lengths, self.word_lengths)\n stems_score=compare_dictionaries(other.stems, self.stems)\n sentl_score=compare_dictionaries(other.sentence_lengths, self.sentence_lengths)\n endings_score=compare_dictionaries(other.endings, self.endings)\n results+= [words_score]\n results+= [wordl_score]\n results+= [stems_score]\n results+= [sentl_score]\n results+= [endings_score]\n return results", "def get_h_score(start, end):\n #uses a heuristic function\n #return 0 #used if you want Djikstras algorithm\n return (abs(end[0]-start[0])+abs(end[1]-start[1])) * 10", "def scoring(self):\n pass", "def get_similarity_score(self, reviewer1: Any, reviewer2: Any) -> float:\n v1 = self._vertices[reviewer1]\n v2 = self._vertices[reviewer2]\n return v1.reviewer_similarity_score(v2)", "def find_winner_scores(self):\n p1_score, p2_score = self.find_scores()\n if p1_score > p2_score:\n winner = 1\n elif p1_score < p2_score:\n winner = 2\n else:\n winner = 0\n return winner, p1_score, p2_score", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the base information to calculate player & opponent\n # feature values\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n if len(player_legal_moves) != len(opponent_legal_moves):\n return float(len(player_legal_moves) - len(opponent_legal_moves))\n \n # Get_center_coordinates and opponent. Then set the list of participants\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n participants = [player, opponent]\n \n # Then, for each participant obtain his/her feature values \n for participant in participants:\n if participant == player:\n p_legal_moves = player_legal_moves\n player_either = player\n participant_coordinates = p_y, p_x = \\\n game.get_player_location(participant)\n player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, \\\n player_path_count, player_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either,participant_coordinates, p_legal_moves)\n else:\n p_legal_moves = opponent_legal_moves\n player_either = opponent\n participant_coordinates = p_y, p_x \\\n = game.get_player_location(participant)\n opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, opponent_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either, participant_coordinates, p_legal_moves)\n \n # Place each participant's feature values in a tuple/vector surrogate \n pro_player_vector = \\\n (player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, player_path_count, \\\n opponent_min_center_diff)\n pro_opponent_vector = \\\n (opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, player_min_center_diff)\n \n # Provide a weighting vector for the features \n weight_vector = (1.5,0.1,1.0,0.001,0.001,0.001)\n \n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*(q-r ) for p,q,r \\\n in zip(weight_vector, pro_player_vector, pro_opponent_vector))\n \n return float(weighted_difference_dot_product)", "def resultat_match(self, binomes):\n for binome in binomes:\n while True:\n score_un = self.vue.entree_resultats(binome[0])\n score_deux = self.vue.entree_resultats(binome[1])\n if score_un + score_deux != 1:\n self.vue.erreur_score()\n continue\n else:\n binome[0].ajout_score(score_un)\n binome[1].ajout_score(score_deux)\n table_players.update({\"score\": binome[0].points},\n doc_ids=[binome[0].id])\n table_players.update({\"score\": binome[1].points},\n doc_ids=[binome[1].id])\n break\n self.vue.afficher_resultats(binomes)", "def __score_by_iceberg_distance(self, source_iceberg, destination_iceberg_to_score):\n\n distance = utils.get_real_distance_between_icebergs(source_iceberg, destination_iceberg_to_score)\n\n return DISTANCE_FACTOR_SCORE * (float(distance) / float(self.__max_distance))", "def get_score(self, red_score, blue_score):\n if red_score < blue_score:\n return 0\n elif red_score > blue_score:\n return 1\n else:\n return 0.5", "def arsenalResults(dat):\n arsScore = int(dat[0])\n othScore = int(dat[2])\n if arsScore > othScore:\n res = 1\n elif arsScore == othScore:\n res = 2\n else:\n res = 0\n return res", "def classify(self, source1, source2):\n scores1 = self.similarity_scores(source1)\n scores2 = self.similarity_scores(source2)\n\n print(\"scores for\", source1.name, \":\", [round(number, 2) for number in scores1])\n print(\"scores for\", source2.name, \":\", [round(number, 2) for number in scores2])\n\n s1 = 0\n s2 = 0\n for x in range(len(scores1)):\n if scores1[x] >= scores2[x]:\n s1 += 1\n else:\n s2 += 1\n \n if s1 > s2:\n print(self.name, \"is more likely to have come from \", source1.name)\n print()\n else:\n print(self.name, \"is more likely to have come from \", source2.name)\n print()", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def __score_by_avg_distance_from_players(self, source_iceberg, iceberg_to_score, simulation_data):\n ours_avg_distance, enemy_avg_distance = simulation_data.get_avg_distance_from_players(\n iceberg_to_score)\n\n score = (enemy_avg_distance - ours_avg_distance) * AVG_DISTANCE_FROM_PLAYERS_FACTOR_SCORE\n score += self.__score_by_strong_enemy_close_to_me(source_iceberg)\n return score", "def __calculateNormalizedScores(self):\n year_scores = {0 : []}\n for venue in self.venue_scores:\n v_scores = []\n for year in self.venue_scores[venue]:\n v_scores.append(self.venue_scores[venue][year])\n if year not in year_scores:\n year_scores[year] = []\n year_scores[year].append(self.venue_scores[venue][year])\n x_year = np.average(np.array(v_scores))\n self.venue_scores[venue][0] = x_year\n year_scores[0].append(x_year)\n \n ##for standardization\n #year_metrics = {x : (np.average(np.array(year_scores[x])), np.std(np.array(year_scores[x]))) for x in year_scores}\n ##for normalization\n year_metrics = {x: (max(year_scores[x]), min(year_scores[x])) for x in year_scores}\n \n #print year_metrics\n \n for venue in self.venue_scores:\n self.normalized_scores[venue] = dict()\n for year in self.venue_scores[venue]:\n #self.standard_scores[venue][year] = round((self.venue_scores[venue][year] - year_metrics[year][0]) / year_metrics[year][1],5)\n #self.normalized_scores[venue][year] = (self.venue_scores[venue][year] - year_metrics[year][1]) / (year_metrics[year][0] - year_metrics[year][1]) + eps\n self.normalized_scores[venue][year] = (self.venue_scores[venue][year] - year_metrics[year][1] + self.epsilon) / (year_metrics[year][0] - year_metrics[year][1] + self.epsilon)", "def similarity_scores(self, other):\n word_score = compare_dictionaries(other.words, self.words)\n word_length_score = compare_dictionaries(other.word_lengths, self.word_lengths)\n sentence_length_score = compare_dictionaries(other.sentence_lengths, self.sentence_lengths)\n stem_score = compare_dictionaries(other.stems, self.stems)\n comma_score = compare_dictionaries(other.commas_per_sentence, self.commas_per_sentence)\n list_scores = [word_score, word_length_score, sentence_length_score, stem_score, comma_score]\n return list_scores", "def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 15.0)\r\n self.assertEqual(score_dict['total'], 5.0)", "def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)", "def scoreTeams(curTeams, oppTeam, pokedex, league, minDistWanted):\n battleData, htmlData = loadBattleData(league)\n similarities = loadSims() \n \n #If not given an opponent team then simply randomly choose losers from the dataset to compare to.\n if len(oppTeam) == 0:\n picks = set([])\n while (len(picks) < NUMLOSINGTEAMS and (not len(picks) == len(battleData))):\n picks.add(random.randint(0,len(battleData)-1))\n\n losers = []\n loserDict = {}\n for i in picks:\n entry = battleData[i]\n winner,loser = determineWinner(entry)\n loserDict[str(loser)] = [winner]\n losers.append( (loser,0) )\n\n #Given opponent team then find similar teams\n else:\n oppTeam = [getSimPokemon(opp,similarities) for opp in oppTeam]\n\n #create dictionary from losers team to the team that beat them.\n loserDict = {}\n sims = []\n for d in battleData:\n winner, loser = determineWinner(d)\n\n wTeam = teamToArray(winner,pokedex)\n lTeam = np.array(teamToArray(loser, pokedex))\n\n score = 0\n for oppNp in oppTeam:\n score+= np.amax(lTeam*oppNp) \n\n if str(loser) in loserDict:\n loserDict[str(loser)].append(winner)\n else:\n #new to dictonary\n loserDict[str(loser)] = [winner]\n\n sims.append((loser, score))\n\n\n sims = sorted(sims, key = lambda x : x[1], reverse = True)\n\n cutoff = min(len(sims),NUMLOSINGTEAMS)\n losers = sims[:cutoff]\n\n #Gather winners to losing teams\n winnersComp = []\n for loser,_ in losers:\n for winner in loserDict[str(loser)]:\n winnersComp.append(teamToArray(winner,pokedex))\n \n topScore = len(winnersComp)*6 #pkmn team size\n\n results = []\n inverted_idx = {}\n\n existsSet = []\n\n #Creates inverted index for teams, while simoultaneously weeding out any teams that are exactly similar.\n for i in range(len(curTeams)):\n team = curTeams[i]\n results.append((team,0))\n sTeam = set(team)\n if not (sTeam in existsSet):\n existsSet.append(sTeam)\n for pkm in team:\n if pkm != EMPTY:\n if pkm in inverted_idx:\n inverted_idx[pkm].append(i)\n else:\n inverted_idx[pkm] = [i]\n \n #Giving the similiarity scores to the winners based off of the inverted index.\n for pkm in inverted_idx:\n for winner in winnersComp:\n wArr = np.array(winner)\n #tArr = getSimPokemon(pkm,similarities)\n tArr = similarities[pkm]\n \n vals = wArr * tArr\n\n score = np.amax(vals)\n\n for i in inverted_idx[pkm]:\n results[i] = (results[i][0],results[i][1]+(score/topScore))\n\n results = sorted(results, key = lambda x : x[1], reverse = True)\n\n if len(results) < NUMTEAMSRETURN:\n if len(results) == 0:\n returnTeams = [[] for x in range(NUMTEAMSRETURN)]\n teamScores = [0 for x in range(NUMTEAMSRETURN)]\n\n else:\n returnTeams = [result[0] for result in results]\n teamScores = [result[1] for result in results]\n else:\n firstResult, firstScore = results[0]\n returnTeams = [firstResult]\n teamScores = [round(firstScore*100,1)]\n returnSets = [set(firstResult)]\n \n i = 1\n\n #Loops through results and adds teams with the proper edit distance away.\n while(len(returnTeams) < NUMTEAMSRETURN and minDistWanted > 0):\n teamToConsider,teamToConsiderScore = results[i]\n \n considerSet = set(teamToConsider)\n add = True\n ##checks the edit distance of teams is above wanted\n for team in returnSets:\n if len(team.union(considerSet)) < len(team)+minDistWanted:\n add = False\n\n ##If indeed above wanted levels then add\n if add:\n returnTeams.append(teamToConsider)\n returnSets.append(considerSet)\n teamScores.append(round(teamToConsiderScore*100,1))\n \n i+=1\n\n if i >= len(results):\n i = 1\n minDistWanted -= 1 \n \n winHtmls = []\n if htmlData != None:\n for team,_ in losers:\n for winner in loserDict[str(team)]:\n winHtmls.extend(htmlData[str(sorted(winner))])\n \n\n return returnTeams, teamScores, winHtmls", "def __score_by_penguins_gaining(self, source_iceberg, destination_iceberg_to_score,\n iceberg_owner_after_all_groups_arrived):\n if iceberg_owner_after_all_groups_arrived.equals(self.__game.get_myself()):\n return 0\n turns_to_check = self.__max_distance - \\\n source_iceberg.get_turns_till_arrival(destination_iceberg_to_score)\n return turns_to_check * destination_iceberg_to_score.penguins_per_turn * PENGUINS_GAINING_SCORE_FACTOR", "def compare_sentences(s1,s2):\n\n total_score=0;\n num_total=0;\n for k in s1:\n num_total+=1\n if k in s2:\n if s1[k]==s2[k]:\n score = 1;\n else:\n score = 0;\n else:\n score = 0;\n total_score += score;\n \n for k in s2:\n if k not in s1:\n num_total+=1\n\n logging.info(\"Scored %f out of %d\" % (total_score, num_total));\n\n if num_total==0:\n return 1;\n return float(total_score)/float(num_total)", "def getScorePair(info1,info2):\n info1 = np.array(info1)\n info2 = np.array(info2)\n score = np.count_nonzero((info1==info2) & (info1!=\"\"))\n if info1[3]!=\"\" and info2[3]!=\"\" and info1[3]!=info2[3]:\n #Middle Initial vs middle name\n #Note this will count two different last names with the same first initial\n if info1[3][0]==info2[3][0]:\n score += 1\n if info1[15]!=\"\" and info1[16]!=\"\" and info1[15]!=info2[15] and info1[16]!=info2[16]:\n if L.distance(info1[15],info2[15])<=2 or L.distance(info1[16],info2[15])<=2 or L.distance(info1[15],info2[16])<=2 or L.distance(info1[16],info2[16])<=2:\n #if they swap primary and secondary phone numbers\n score += 1\n \n #Typos in LAST,FIRST,ALIAS. allow up to 2 mistakes\n for j in [1,2,18]:\n if info1[j]!=\"\" and info2[j]!=\"\" and info1[j]!=info2[j]:\n if L.distance(info1[j],info2[j])<=2:\n score += 1\n \n #Typos in DOB. allow up to 1 mistake\n if info1[5]!=\"\" and info2[5]!=\"\" and info1[5]!=info2[5]:\n if L.distance(info1[5],info2[5])<1:\n score += 1\n return score", "def match(name1, name2):\n points = getPoints([name1,name2])\n su = points[name1] + points[name2]\n\n mutate_dict(lambda x: (x/su)/4, points)\n score1 = non_linearRandomInt(10, points[name1])# + non_linearRandomInt(3,1/4)\n score2 = non_linearRandomInt(10, points[name2])\n \n return (score1,score2)", "def get_scores(self) -> tuple:\n return (self.get_score(), self.p2_score)", "def score(self):\n\n self.link()\n roc, _ = self.aggregate()\n\n return roc", "def calculate_score(truck, job):\n score = 0\n score += W1 * calculate_distance(job.source_long, job.source_lat, truck.current_long, truck.current_lat)\n print(score)\n score += W2 * calculate_distance(job.destination_long, job.destination_lat, truck.base_long, truck.base_lat)\n print(score)\n score += W3 * job.price\n print(score)\n score += W4 * job.price / job.getDistance()\n print(score)\n\n return score", "def get_score(self, student_answers):\r\n pass", "def score_two(rect1, rect2):\n score = 0.0\n avg_width = (rect1[1][0] + rect2[1][0])/2\n avg_x = (rect1[0][0] + rect2[0][0])/2\n vector = np.array([rect2[0][0] - rect1[0][0], rect2[0][1] - rect1[0][1]])\n length = np.sqrt(np.dot(vector, vector))\n tilt_l = (14.5 - rect1[2])/15\n tilt_r = (14.5 + rect2[2])/15\n if length > 0:\n aim = (avg_x - mid_point)/mid_point\n ratio = 0.2 - avg_width / length\n sine = vector[1] / length\n cosine = vector[0] / length\n score += sine * sine\n score += (1 - cosine)\n score += ratio * ratio\n score += aim * aim\n score += tilt_l * tilt_l\n score += tilt_r * tilt_r\n return score", "def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 0)\r\n self.assertEqual(score_dict['total'], 1)", "def __score_by_support(self, source_iceberg, iceberg_to_score, iceberg_to_score_owner, simulation_data):\n game = self.__game\n is_belong_to_me = iceberg_to_score_owner.equals(game.get_myself())\n score = 0\n is_closest_to_enemy, avr_distance_from_enemy = self.__is_destination_closest_to_enemy(source_iceberg,\n iceberg_to_score,\n simulation_data)\n if is_belong_to_me and is_closest_to_enemy:\n if not iceberg_to_score is game.get_bonus_iceberg():\n score += (self.__max_distance - avr_distance_from_enemy) * SUPPORT_SCORE_FACTOR\n else:\n score += CANT_DO_ACTION_SCORE\n elif is_belong_to_me:\n score += OUR_DESTINATION_ICEBERG_FAR_FROM_ENEMY_SCORE\n return score", "def calculate_score(result):\n sample1=result['Sample1']\n sample2=result['Sample2']\n string1=paragraph_to_list(sample1)\n string2=paragraph_to_list(sample2)\n \n return round( strings_similarity(string1, string2), 2)\n #method_dict=strings_count_compare(string1, string2)/ max(len(string1), len(string2))\n #return round(0.5*(method_difflab+method_dict), 2)", "def calc_match_points(self, match):\n if match.winner == match.TIE:\n match.home.tournament_score += 1\n match.away.tournament_score += 1\n else:\n match.winner.tournament_score += 3\n match.loser.tournament_score += 0", "def custom_score_2(game, player):\n # TODO: finish this function!\n if game.is_loser(player):\n #print(\"You lose!\")\n return -math.inf\n if game.is_winner(player):\n #print(\"You win\")\n return math.inf\n\n # center\n width = game.width / 2\n height = game.height / 2\n\n # Opponent\n opponent = game.get_opponent(player)\n opp_y_coord, opp_x_coord = game.get_player_location(opponent)\n opp_x_eval = (width - float(opp_x_coord)) ** 2\n opp_y_eval = (height - float(opp_y_coord)) ** 2\n opp_center_eval = float(opp_x_eval + opp_y_eval)\n\n # Remaining spaces left on the board\n rem_spaces = len(game.get_blank_spaces())\n\n # number of agent's available moves\n no_moves = len(game.get_legal_moves(player))\n\n # number of opponent's available moves\n opp_moves = len(game.get_legal_moves(opponent))\n\n # evaluation of board \"goodness\"\n # using moves available to both players\n # Idea is player chooses moves with scores that maximise whilst minimizing\n # evaluate board states and positions as scores\n opp_score = opp_moves * 2 - opp_center_eval\n score = no_moves - opp_score/rem_spaces\n return float(score)", "def matching_score(self,set1, set2):\n set_set1=set(set1)\n set_set2=set(set2)\n '''print(\" set_set12\")\n print(set_set1)\n print(set_set2)'''\n return len(set_set1.intersection(set_set2)) ** 2 / (float(len(set1)) * len(set2))\n #return len(set_set1.intersection(set_set2)) / len(set_set1.union(set_set2))", "def similarity_scores(self, other):\n word_score = compare_dictionaries(other.words, self.words)\n word_length_score = compare_dictionaries(other.word_lengths, self.words)\n stem_score = compare_dictionaries(other.stems, self.stems)\n sentence_length_score = compare_dictionaries(other.sentence_lengths, self.sentence_lengths)\n common_word_score = compare_lists(other.common_word, self.common_word)\n\n return [word_score, word_length_score, stem_score, sentence_length_score, common_word_score]", "def run_v1(self):\n start_time = dt.datetime.now()\n first_key_set = set(self.first)\n second_key_set = set(self.second)\n dict = {}\n\n for idx, key in enumerate(first_key_set):\n if(key in dict):\n dict[key] += (len(filter(lambda x:x==key, self.first)))\n else :\n dict[key] = int((len(filter(lambda x: x == key, self.first))))\n\n for idx, key in enumerate(second_key_set):\n if(key in dict):\n dict[key] += (len(filter(lambda x:x==key, self.second)))\n else :\n dict[key] = int((len(filter(lambda x: x == key, self.second))))\n\n result = max(dict.iterkeys(), key=(lambda key : dict[key]))\n print(\"inviting {0} people interest in {1}\".format(dict[result] , result))\n\n end_time = dt.datetime.now()\n print(\"time cost : {0}\".format(end_time - start_time))", "def score(self):\n score_message = {\n 'Onewins': \"\\nThe Winner is Player 1!\",\n 'Twowins': \"\\nThe Winner is Player 2!\",\n 'Tie': \"\\nTie! Looks like everyone's a winner!\",\n 'Nowinner': \"\\nYikes, neither of you win!\"\n }\n if self.pone_score > self.ptwo_score:\n print(score_message['Onewins'])\n elif self.pone_score < self.ptwo_score:\n print(score_message['Twowins'])\n elif self.pone_score == 0 and self.ptwo_score == 0:\n print(score_message['Nowinner'])\n else:\n print(score_message['Tie'])", "def _run():\n matching_terms = {'a', 'b'}\n source_counts = {'a': 10, 'b': 50, 'c': 25}\n target_counts = {'a': 4, 'b': 73, 'c': 15}\n source_chunk = ['a', 'b']\n target_chunk = ['a', 'c', 'b']\n source_distance = score.find_distance(\n matching_terms, source_chunk, source_counts)\n target_distance = score.find_distance(\n matching_terms, target_chunk, target_counts)\n match_score = score.vanilla(\n matching_terms, source_distance, target_distance, source_counts,\n target_counts)\n print('Calculated score:', match_score)", "def cost(v1, v2):\n assert v2 != v_start\n assert v1 != v_end\n\n _, s1 = v1\n _, s2 = v2\n s1 = set(s1) - {\"\"}\n s2 = set(s2) - {\"\"}\n\n # Charge loads if Heidi and Joe are playing after the first dance, or in\n # the last song\n if v1 == v_start and (\"JW\" in s2 or \"HN\" in s2):\n return 100\n if v2 == v_end and (\"JW\" in s1 or \"HN\" in s1):\n return 100\n\n return len(s2 - s1)", "def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the improved score\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n improved = len(player_legal_moves) - len(opponent_legal_moves)\n if improved != 0:\n return float(improved)\n \n # Second get differences from center\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n player_coordinates = game.get_player_location(player)\n opponent_coordinates = game.get_player_location(opponent)\n player_center_dist = get_distances_from_center(center_coordinates, player_coordinates)\n opponent_center_dist = get_distances_from_center(center_coordinates, opponent_coordinates)\n center_dist_diff = player_center_dist - opponent_center_dist\n \n # Third obtain next_moves\n player_next_moves = [get_next_moves(game, move, list(move)) for move in player_legal_moves]\n opponent_next_moves = [get_next_moves(game, move, list(move)) for move in opponent_legal_moves] \n improved_next = len(player_next_moves) - len(opponent_next_moves)\n \n # Put player and opponent feature differences in a tuple/vector surrogoate\n feature_diff_vector = (improved, center_dist_diff, improved_next)\n \n # Provide a weighting vector for the features of each player-participant\n weight_vector = (1.5,0.1,1.0)\n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(feature_diff_vector, weight_vector))\n \n return float(weighted_difference_dot_product)", "def score_solution(g, s):\n pass", "def score(A, B):\n assert 10 <= A <= 100 and 10 <= B <= 100 # you can't get too slow or too fast\n trackA = 100 - B\n trackB = 100 - A\n tA = trackA/A\n tB = trackB/B\n return tB - tA", "def get_estimated_score(match_data: dict) -> float:\n \n auto_high = {match_data['auto_HighClose']: match_data['auto_conInnerClose'],\n match_data['auto_HighFrontCP']: match_data['auto_conInnerFrontCP'],\n match_data['auto_HighLine']: match_data['auto_conInnerLine']\n }\n auto_low = match_data['auto_Low']\n auto_line = match_data['auto_leftSectorLine']\n \n tele_high = {match_data['tele_HighClose']: match_data['tele_conInnerClose'],\n match_data['tele_HighFrontCP']: match_data['tele_conInnerFrontCP'],\n match_data['tele_HighLine']: match_data['tele_conInnerLine'],\n match_data['tele_HighBackCP']: match_data['tele_conInnerBackCP']\n }\n tele_low = match_data['tele_Low']\n climbed = match_data['tele_Climbed']\n parked = match_data['tele_UnderSG']\n \n score = 0\n \n # Gives autonomous points\n for x in auto_high:\n score += (4.3, 4.8)[auto_high[x]] * x\n score += auto_low * 2\n if auto_line: score += 5\n \n # Gives teleop points\n for x in tele_high:\n score += (2.15, 2.4)[tele_high[x]] * x\n score += tele_low\n \n # Gives endgame points\n if climbed: score += 25\n if parked: score += 5\n \n return score", "def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores", "def extract_score(results):\n total_score = 0;\n total_possible_score = 0;\n for k in results.keys():\n total_score = total_score + results[k][0]\n total_possible_score = total_possible_score + results[k][1]\n return (total_score, total_possible_score)", "def score(self):\r\n totN = 0\r\n totB = 0\r\n for l in range(SIZE):\r\n for c in range(len(COLONNES)):\r\n if self.jeu[l][c] == NOIR:\r\n totN += 1\r\n elif self.jeu[l][c] == BLANC:\r\n totB += 1\r\n return (totN, totB)", "def score_scansions(self, scansion1, scansion2):\n p_1 = 1\n p_2 = 1\n word_scansions_1 = scansion1.scansion.lstrip(\" \").rstrip(\" \").split(\" \")\n word_scansions_2 = scansion2.scansion.lstrip(\" \").rstrip(\" \").split(\" \")\n for i, word in enumerate(self.words):\n p_word_1, _ = word.compare_scansions(word_scansions_1[i], word_scansions_2[i])\n p_1 *= p_word_1\n p_2 *= (1 - p_word_1)\n if p_1 > p_2:\n return scansion1, p_1/(p_1 + p_2)\n return scansion2, p_2 / (p_1 + p_2)", "def match_score(seq1, seq2):\n\n seq1 = get_sequence_string(seq1)\n seq2 = get_sequence_string(seq2)\n score = align.localxx(seq1, seq2)[0][2]\n return score", "def wilson_score(upvotes, downvotes):\n if 0 <= upvotes <= _uprange and 0 <= downvotes <= _downrange:\n return _wilson_cache[upvotes][downvotes]\n return _wilson_score(upvotes, downvotes)", "def intersection_score(method1,method2):\n\tpass", "def getScore(data):\n return score", "def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget", "def calculate_score(s1, s2, l1, l2, startpoint):\n matched = \"\" # to hold string displaying alignements\n score = 0\n for i in range(l2):\n #moves shorter sequence along longer, counting number of matches in\n #each position\n if (i + startpoint) < l1:\n if s1[i + startpoint] == s2[i]: # if the bases match\n matched = matched + \"*\"\n score = score + 1\n else:\n matched = matched + \"-\"\n\n # gives an output for each startpoint showing the number of matches where\n # * is a match and - is no match, position of the two sequences relative to\n # each other, and the number of matches for that startpoint.\n print(\".\" * startpoint + matched) \n print(\".\" * startpoint + s2)\n print(s1)\n print(score) \n print(\" \")\n\n return score", "def calculate_score(origin, sentences):\n result = 0\n if len(origin):\n result=sum(compare_with_bounds(origin, sentence) for sentence in sentences) / float(len(sentences))\n return result", "def run_match(agent1, agent2, rounds, payoffs):\r\n\r\n agent1_last_action = None\r\n agent2_last_action = None\r\n\r\n agent1_score = 0\r\n agent2_score = 0\r\n\r\n for r in range(rounds):\r\n try:\r\n agent1_action = agent1.play_round(agent2_last_action)\r\n except Exception as e:\r\n # On error, award penalize maximum points\r\n return (payoffs[0][1][0] * rounds, 0)\r\n\r\n try:\r\n agent2_action = agent2.play_round(agent1_last_action)\r\n except Exception as e:\r\n # On error, award penalize maximum points\r\n return (0, payoffs[0][1][0] * rounds)\r\n\r\n # compute payoff\r\n scores = score_helper(agent1_action, agent2_action, payoffs)\r\n\r\n agent1_score += scores[0]\r\n agent2_score += scores[1]\r\n\r\n agent1_last_action = agent1_action\r\n agent2_last_action = agent2_action\r\n\r\n return agent1_score, agent2_score", "def score(c1, c2):\n if c1 == c2:\n return 1\n else:\n return 0", "def findSimilarityScore(self, source, destination):\n\n\n if self.similarityScores is not None:\n return self.similarityScores[source][destination]\n\n # Project graph (if a meta path was provided)\n if self.metaPath is None:\n projectedGraph = self.graph\n else:\n if self.metaPath[0] == self.metaPath[-1]: # Homogeneous projection?\n projectedGraph = self.metaPathUtility.createHomogeneousProjection(self.graph, self.metaPath)\n else:\n projectedGraph = self.metaPathUtility.createHeterogeneousProjection(self.graph, self.metaPath)\n\n # Build initial similarity scores\n self.similarityScores = defaultdict(dict)\n nodes = self.graph.getNodes()\n for a, b in itertools.product(nodes, nodes):\n self.similarityScores[a][b] = 1 if a is b else 0\n\n self.similarityScores = self.__simRank(projectedGraph, self.similarityScores, SimRankStrategy.k)\n\n return self.similarityScores[source][destination]", "def define_score(self, votes_string):\n\t\t#2*REW + colleagues + post-doctorate associate + 2* JBW\n\t\tvotes = [int(x) for x in votes_string] \n\t\tweights = [2,1,1,2]\n\t\tscore = 0\n\t\tfor i in range(0, 4):\n\t\t\tif votes[i] >= 0 and votes[i] <= 2:\n\t\t\t\tscore += votes[i]*weights[i]\n\t\treturn score", "def get_scores(self):\n\n\t\tscores = np.dot(self.rankings, self.weights)\n\t\tranked_indices = np.argsort(scores)\n\t\tranked_sources = self.source_names[ranked_indices]\n\t\tranked_scores = sorted(scores)\n\t\tself.scores = {source:score for source, score in zip(ranked_sources, ranked_scores)}\n\n\t\treturn self.scores", "def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))", "def get_win_rate_regular_season_for_each_coach(self):\n self.games_won_for_coaches = (\n self.raw_data_regularseason\n [['Season','DayNum','WTeamID']]\n # merge for winning team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','WTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_win\",\"LastDayNum\":\"LastDayNum_win\",\"CoachName\":\"CoachName_win\",\"TeamID\":\"TeamID_win\"})\n .pipe(lambda x:x.assign(which_coach_for_win = np.where((x.FirstDayNum_win <= x.DayNum) & (x.LastDayNum_win >= x.DayNum),1,0)))\n .query(\"which_coach_for_win != 0\")\n .groupby(['Season','CoachName_win','WTeamID'])\n .agg({\"which_coach_for_win\":\"sum\"})\n .reset_index()\n )\n\n self.games_lose_for_coaches = (\n self.raw_data_regularseason\n [['Season','DayNum','LTeamID']]\n # merge for losing team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','LTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_lose\",\"LastDayNum\":\"LastDayNum_lose\",\"CoachName\":\"CoachName_lose\",\"TeamID\":\"TeamID_lose\"})\n .pipe(lambda x:x.assign(which_coach_for_lose = np.where((x.FirstDayNum_lose <= x.DayNum) & (x.LastDayNum_lose >= x.DayNum),1,0)))\n .query(\"which_coach_for_lose != 0\")\n .groupby(['Season','CoachName_lose','LTeamID'])\n .agg({\"which_coach_for_lose\":\"sum\"})\n .reset_index()\n )\n\n # combine games won and lost df\n self.combine_regular_games_won_lose = (\n self.games_lose_for_coaches\n .merge(self.games_won_for_coaches,how='left',left_on=['Season','LTeamID','CoachName_lose'],right_on=['Season','WTeamID','CoachName_win'])\n .pipe(lambda x:x.assign(win_rate_regular = x.which_coach_for_win/(x.which_coach_for_win + x.which_coach_for_lose)))\n .drop(['CoachName_win','WTeamID'],1)\n .rename(columns={\"CoachName_lose\":\"CoachName\",\"LTeamID\":\"TeamID\",\"which_coach_for_lose\":\"games_lost\",\"which_coach_for_win\":\"games_won\"})\n )", "def get_raw_score(self, set1, set2):\n \n # input validations\n utils.sim_check_for_none(set1, set2)\n utils.sim_check_for_list_or_set_inputs(set1, set2)\n\n # if exact match return 1.0\n if utils.sim_check_for_exact_match(set1, set2):\n return 1.0\n\n # if one of the strings is empty return 0\n if utils.sim_check_for_empty(set1, set2):\n return 0\n\n if not isinstance(set1, set):\n set1 = set(set1)\n if not isinstance(set2, set):\n set2 = set(set2)\n\n return 2.0 * float(len(set1 & set2)) / float(len(set1) + len(set2))", "def get_sim_score(self, set1, set2):\n return self.get_raw_score(set1, set2)", "def get_score(self):\n for response in self.response_list:\n self.score += response.get_score", "def calculate_score(self):\n score = []\n guess_secret_code = self.guess_code\n game_secret_code = self.game.secret_code\n\n remaining_secrets = []\n remaining_guesses = []\n\n # Match one by one and search for full matches\n for guess, secret in zip(game_secret_code, guess_secret_code):\n if guess == secret:\n score.append(1)\n else:\n # If we don't have full match , save the rest of the list items in a temporary list\n remaining_guesses.append(guess)\n remaining_secrets.append(secret)\n\n # Search for each guess element to be present in the remaining secret_code options for partial matches\n for guess in remaining_guesses:\n if guess in remaining_secrets:\n score.append(0)\n remaining_secrets.remove(guess)\n\n return score", "def get_scores(self):\n return self.score", "def getVenueScores(self, v_name, normalized=True, scopus_name=False):\n if not scopus_name:\n v_name = self.getVenueName(v_name)\n if not v_name:\n return None \n #s_df = self.scores_df.loc[self.scores_df['scopus_name'] == v_name]\n #scores = zip(list(s_df['year'].values),list(s_df['score'].values))\n if normalized:\n return self.normalized_scores[v_name]\n return self.venue_scores[v_name]", "def __score_by_iceberg_price(self, source_iceberg, destination_iceberg_to_score, simulation_data,\n occupy_method_data):\n score = 0\n game = self.__game\n min_penguins_for_occupy = occupy_method_data.min_penguins_for_occupy\n\n log(occupy_method_data)\n if utils.is_me(game, occupy_method_data.owner): # In the end, the iceberg belongs to us.\n score += self.__score_by_support(source_iceberg, destination_iceberg_to_score, game.get_myself(),\n simulation_data)\n elif destination_iceberg_to_score.owner.equals(game.get_myself()):\n # We want to protect out iceberg if it gonna to be occupied.\n score += OUR_DESTINATION_ICEBERG_IN_DANGER_SCORE\n\n # Max penguins can be used\n max_penguins_can_be_use = occupy_method_data.max_penguins_can_be_use\n\n # Is source has enough penguins to send\n if max_penguins_can_be_use - min_penguins_for_occupy < self.__min_penguins_amount:\n score += CANT_DO_ACTION_SCORE\n\n # Score by price\n score += PRICE_FACTOR_SCORE * (float(min_penguins_for_occupy) / self.__max_price)\n\n if utils.is_enemy(game, destination_iceberg_to_score.owner) \\\n and (min_penguins_for_occupy <= max_penguins_can_be_use or utils.is_empty(game.get_neutral_icebergs())) \\\n and len(game.get_enemy_icebergs()) == 1:\n score += LAST_ENEMY_ICEBERG_THAT_CAN_BE_OCCUPIED\n\n # Check whether source will be in danger if send the penguins.\n iceberg_simulation_data = simulation_data.get(source_iceberg)\n owner = iceberg_simulation_data[-1][OWNER]\n if not self.__game.get_myself().equals(owner):\n score += OUR_SOURCE_ICEBERG_IN_DANGER_SCORE\n return score", "def classify(self, source1, source2):\n scores1 = self.similarity_scores(source1)\n scores2 = self.similarity_scores(source2)\n for i in range(len(scores1)):\n scores1[i] = round(scores1[i], 2)\n scores2[i] = round(scores2[i], 2)\n \n print('scores for source1: ', scores1, '\\n', \\\n 'scores for source2: ', scores2)\n num_larger1 = 0\n num_larger2 = 0\n for i in range(len(scores1)):\n if scores1[i] > scores2[i]:\n num_larger1 += 1\n else:\n num_larger2 += 1\n if num_larger1 > num_larger2:\n print(self.name, 'is more likely to have come from source1')\n else:\n print(self.name, 'is more likely to have come from source2')", "def calculate_score(s1, s2, l1, l2, startpoint):\n\n matched = \"\" # to hold string displaying alignments\n score = 0\n for i in range(l2):\n if (i + startpoint) < l1:\n if s1[i + startpoint] == s2[i]: # if the bases match\n matched = matched + \"*\" # * indicates a match\n score = score + 1\n else:\n matched = matched + \"-\" # - indicates no match\n\n return score", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = own_moves / opp_moves\n\n completeness = completeness_of_game(game)\n centerness_score = 0\n\n if completeness < 0.5:\n own_centerness = centerness(game, player)\n opp_centerness = centerness(game, game.get_opponent(player))\n centerness_ratio = own_centerness / opp_centerness + 0.1\n\n center_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def get_score(score_map, test_result):\n if test_result < score_map[20]:\n return int((test_result / score_map[20]) * 20)\n elif test_result < score_map[40]:\n return int(20 + (test_result - score_map[20]) / (score_map[40] - score_map[20]) * 20)\n elif test_result < score_map[60]:\n return int(40 + (test_result - score_map[40]) / (score_map[60] - score_map[40]) * 20)\n elif test_result < score_map[85]:\n return int(60 + (test_result - score_map[60]) / (score_map[85] - score_map[60]) * 20)\n elif test_result < score_map[100]:\n return int(85 + (test_result - score_map[85]) / (score_map[100] - score_map[85]) * 20)\n else:\n return 100", "def get_score(self, solution: np.array) -> float:\n pass", "def bridge_score(bridge):\n return (bridge_strength(bridge), len(bridge))", "def get_identical_score(bin1,bin2=None):\n if bin2==None: bin2=[]\n tmpscore=0.0\n norm=0\n for ali1 in bin1:\n tmpscore+=get_subscore(ali1,ali1)\n norm+=1\n for ali2 in bin2:\n tmpscore+=get_subscore(ali2,ali2)\n norm+=1\n return tmpscore/norm", "def score(self, X, y):\n ...", "def get_local_score(self):\n for candidate in self.candidate_list:\n self.score += candidate.get_score()", "def second_way(person_to_clubs: Dict[str, List[str]], \n club_to_person: Dict[str, List[str]], \n person: str, club: str) -> int:\n score = 0\n if person not in person_to_clubs:\n return score\n for member in club_to_person[club]:\n for diff_clubs in person_to_clubs[member]:\n if diff_clubs != club and diff_clubs in person_to_clubs[person]:\n score += 1\n return score", "def score_candidate_branch(self, other):\n # The maximum difference in radius between self and other to earn any radius score\n max_allowable_radius_difference = 4\n # Weights on the components of the final score\n vector_weight = 0.8\n radius_weight = 1 - vector_weight\n\n # Find the direction trend of the current region of each root\n vector_near_end_self = self.get_ending_direction_vector()\n vector_near_start_other = other.get_starting_direction_vector()\n\n if vector_near_end_self and vector_near_start_other:\n\n # Find the angle between the direction vectors\n dot = (vector_near_end_self[0]*vector_near_start_other[0] + vector_near_end_self[1]*vector_near_start_other[1])\n len_self = math.sqrt(vector_near_end_self[0]**2 + vector_near_end_self[1]**2)\n len_other = math.sqrt(vector_near_start_other[0]**2 + vector_near_start_other[1]**2)\n if len_other and len_self:\n angle_cos = round(dot/(len_other*len_self), 3)\n angle_radians = math.acos(angle_cos)\n # Score the direction component out of 100\n vector_score = 100*(angle_radians/(2*math.pi))\n else:\n vector_score = 50\n\n else:\n\n # Handle 1-length roots\n vector_score = 50\n\n # Get the average radii in the area of interest\n average_end_radius_self = self.get_average_end_radius()\n average_start_radius_other = other.get_average_start_radius()\n\n # Score the radius component out of 100\n radius_difference = abs(average_end_radius_self - average_start_radius_other)\n radius_score = max(0, 100 - (100/max_allowable_radius_difference)*radius_difference)\n\n return vector_weight*vector_score + radius_weight*radius_score", "def similarity_score(self, lhs, rhs):\n pass", "def custom_score_2(game, player):\n \"\"\"custom_score_2 heuristic function idea is to implement defensive heuristic function\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_moves = len(game.get_legal_moves(player)) # calculated length of available moves for my player\n length_moves_opponent_player = len(game.get_legal_moves(game.get_opponent(player))) #Calculated length of available moves for opponent player\n return float(1.5*length_my_moves - length_moves_opponent_player)", "def get_score(snack_data, percentage_data, snack, snack_query, protein_query, carb_query, fat_query):\n\tstart_time = time.time()\n\n\t#Load necessary data\n\t\"\"\"\twith open ('../../../Data/percentagesDict.pickle', 'rb') as f:\n\t\tpercentage_data = pickle.load(f)\n\n\twith open ('../../../Data/FINAL_snacks_data.pickle', 'rb') as f:\n\t\tsnack_data = pickle.load(f)\"\"\"\n\n\t#Set constants\n\tLOW_FAT = .3\n\tHIGH_FAT = .6\n\tLOW_CARB = .1\n\tHIGH_CARB = .2\n\tLOW_PRO = .2\n\tHIGH_PRO = .4\n\n\t#Convert macro percentages to 'high', 'med', 'low' categories\n\tfat = percentage_data[snack]['fat']\n\tprotein = percentage_data[snack]['protein']\n\tcarb = percentage_data[snack]['carb']\n\n\tif fat > HIGH_FAT:\n\t\tfat_content = 'high'\n\telif fat < LOW_FAT:\n\t\tfat_content = 'low'\n\telse:\n\t\tfat_content = 'med'\n\n\tif protein > HIGH_PRO:\n\t\tprotein_content = 'high'\n\telif protein < LOW_PRO:\n\t\tprotein_content = 'low'\n\telse:\n\t\tprotein_content = 'med'\n\n\tif carb > HIGH_CARB:\n\t\tcarb_content = 'high'\n\telif carb < LOW_CARB:\n\t\tcarb_content = 'low'\n\telse:\n\t\tcarb_content = 'med'\n\n\t#Set x values\n\tx1 = fat_query == fat_content\n\tx2 = carb_query == carb_content\n\tx3 = protein_query == protein_content\n\tx4 = cooccur(snack_data, snack, snack_query) \n\tx5 = snack_data[snack]['rating']\n\n\tw1 = 1\n\tw2 = 1\n\tw3 = 1\n\tw4 = 1\n\tw5 = 1\n\t\n\t#print('x1: {}, x2: {}, x3: {}, x4: {}, x5: {}'.format(x1, x2, x3, x4, x5))\n\t#print(\"get_score() time: --- %s seconds ---\" % (time.time() - start_time))\n\n\n\treturn w1*x1 + w2*x2 + w3*x3 + w4*x4 + w5*x5", "def __score_by_iceberg_belogns(self, source_iceberg, iceberg_to_score, iceberg_to_score_owner):\n game = self.__game\n if iceberg_to_score_owner.equals(game.get_enemy()):\n return ENEMY_BELONGS_SCORE\n\n if iceberg_to_score_owner.equals(game.get_myself()):\n return MY_BELONGS_SCORE\n\n return NEUTRAL_BELONGS_SCORE", "def compare_dictionaries(d1, d2):\n score = 0\n total = 0\n \n for i in d1:\n total = total + d1[i]\n for i in d2:\n if i in d1:\n if total == 0:\n score = score\n else:\n probablility = (d1[i] / total)\n score = score + (math.log10(probablility) * d2[i])\n else:\n if total == 0:\n score = score\n else:\n score = score + ((0.5 / total) * d2[i])\n return score", "def distance_score(vertex1, board, player_id): #implement preference for closer settlements\n num_buildings = 0\n total_dist = 0\n player_buildings = board.get_player_settlements(player_id) + board.get_player_cities(player_id)\n\n if len(player_buildings) == 0: #if it is our first turn\n return 0\n\n player_roads = board.get_player_roads(player_id)\n accessible_vertices = list(set(player_buildings+ [vertex for pair in player_roads for vertex in pair]))\n get_distance = lambda v: manhattan_distance(v, vertex1, board)\n min_distance = min(map(get_distance, accessible_vertices))\n\n enemy_buildings = [v for v in board.settlements if board.settlements[v] != player_id]\n enemy_roads = [r for r in board.roads if board.roads[r] != player_id]\n\n\n \"\"\"\n for s in board.settlements:\n if board.settlements[s] != player_id:\n vertex2 = s\n total_dist_enemies += manhattan_distance(vertex1, vertex2, board)\n num_buildings+=1\n\n for c in board.cities:\n if board.cities[c] != player_id:\n vertex2 = c\n total_dist_enemies += manhattan_distance(vertex1, vertex2, board)\n num_buildings+=1\n\n \"\"\"\n return min_distance", "def db2score(self):\n print(\"db2score\")\n self.score.array_frame_start = self.arrayFrameStart\n self.score.array_frame_end = self.arrayFrameEnd\n self.score.arraySet = self.arraySet\n self.score.arrayGame = self.arrayGame\n self.score.arrayScore = self.arrayScore\n self.score.arrayScoreResult = self.arrayScoreResult\n self.score.arrayFirstSecond = self.arrayFirstSecond\n self.score.arrayServer = self.arrayServer\n\n self.score.arrayPointWinner = self.arrayPointWinner\n self.score.pointWin = self.pointWin\n self.score.arrayPointPattern = self.arrayPointPattern\n self.score.arrayForeBack = self.arrayForeBack\n\n self.score.arrayContactServe = self.arrayContactServe\n self.score.arrayCourt = self.arrayCourt\n\n self.score.playerA = self.playerA\n self.score.playerB = self.playerB\n self.score.number = self.number\n self.score.totalGame = self.totalGame\n self.score.faultFlug = self.faultFlug\n self.score.arrayFault = self.arrayFault\n\n # size = len(self.score.array_frame_start)\n\n self.score.shot_frame = self.shot_frame\n self.score.array_ball_position_shot_x = self.array_ball_position_shot_x\n self.score.array_ball_position_shot_y = self.array_ball_position_shot_y\n self.score.arrayPlayerAPosition_x = self.arrayPlayerAPosition_x\n print(self.score.arrayPlayerAPosition_x)\n self.score.arrayPlayerAPosition_y = self.arrayPlayerAPosition_y\n self.score.arrayPlayerBPosition_x = self.arrayPlayerBPosition_x\n self.score.arrayPlayerBPosition_y = self.arrayPlayerBPosition_y\n self.score.arrayHitPlayer = self.arrayHitPlayer\n self.score.arrayBounceHit = self.arrayBounceHit\n self.score.arrayForeBack = self.arrayForeBack\n self.score.arrayDirection = self.arrayDirection\n\n self.score.array_x1 = self.array_x1\n self.score.array_y1 = self.array_y1\n self.score.array_x2 = self.array_x2\n self.score.array_y2 = self.array_y2\n self.score.array_x3 = self.array_x3\n self.score.array_y3 = self.array_y3\n self.score.array_x4 = self.array_x4\n self.score.array_y4 = self.array_y4\n\n self.score.array_frame_start = self.arrayFrameStart\n self.score.shot_index = self.score.create_index_shot(\n self.score.array_frame_start, self.score.shot_frame\n )\n\n # for i in len(self.score.array_frame_start):\n # self.score.shot_index = [0 for i in range(len(self.array_ball_position_shot))]#あとで変更の必要あり\n\n # self.score.array_ball_position_shot = self.check_size_return_array(\n # self.array_ball_position_shot, size\n # )\n # self.score.arrayPlayerAPosition = self.check_size_return_array(\n # self.arrayPlayerAPosition, size\n # )\n # self.score.arrayPlayerBPosition = self.check_size_return_array(\n # self.arrayPlayerBPosition, size\n # )\n # self.score.arrayHitPlayer = self.check_size_return_array(\n # self.arrayHitPlayer, size\n # )\n # self.score.arrayBounceHit = self.check_size_return_array(\n # self.arrayBounceHit, size\n # )\n # self.score.arrayForeBack = self.check_size_return_array(\n # self.arrayForeBack, size\n # )\n # self.score.arrayDirection = self.check_size_return_array(\n # self.arrayDirection, size\n # )\n\n # self.score.array_x1 = self.check_size_return_array(self.array_x1, size)\n # self.score.array_y1 = self.check_size_return_array(self.array_y1, size)\n # self.score.array_x2 = self.check_size_return_array(self.array_x2, size)\n # self.score.array_y2 = self.check_size_return_array(self.array_y2, size)\n # self.score.array_x3 = self.check_size_return_array(self.array_x3, size)\n # self.score.array_y3 = self.check_size_return_array(self.array_y3, size)\n # self.score.array_x4 = self.check_size_return_array(self.array_x4, size)\n # self.score.array_y4 = self.check_size_return_array(self.array_y4, size)\n\n return self.score", "def scoreRsrc( self, rr ):\r\n result = 0.0\r\n for tt in self.getSched( )[rr.getid( )]:\r\n for se in tt:\r\n result += 1\r\n print( \"INFO: Value for %s: %s \" % ( rr, result ) )\r\n return( result )", "def _calculate_score(lsh, minhash, total_num_events):\n neighbours = lsh.query(minhash)\n return float(len(neighbours)) / float(total_num_events)", "def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.graph)" ]
[ "0.70562255", "0.66023374", "0.64212406", "0.6366032", "0.6331436", "0.6192957", "0.6185072", "0.61593205", "0.61031914", "0.60388577", "0.6024461", "0.59839195", "0.59788215", "0.59720534", "0.59422135", "0.59361476", "0.5904944", "0.5902023", "0.5900325", "0.5871707", "0.5870452", "0.5868713", "0.5860786", "0.58463854", "0.58370984", "0.58326787", "0.5828873", "0.58274585", "0.5826481", "0.5816241", "0.5809004", "0.5805535", "0.57894987", "0.5789454", "0.5781541", "0.5772368", "0.5771382", "0.574854", "0.57469726", "0.57444316", "0.5738762", "0.5737964", "0.5737262", "0.5722026", "0.57216", "0.57197857", "0.57087106", "0.5696915", "0.5688989", "0.5683704", "0.56780607", "0.56733143", "0.5670568", "0.5652749", "0.5650491", "0.5649427", "0.564001", "0.56340975", "0.56256866", "0.56245184", "0.56222045", "0.5619274", "0.56157666", "0.56144285", "0.5610833", "0.56091976", "0.56087786", "0.56035024", "0.5595505", "0.559399", "0.5593901", "0.55922014", "0.55918056", "0.5590907", "0.5590811", "0.55896944", "0.55871135", "0.55867165", "0.5581774", "0.55701053", "0.5562015", "0.55555826", "0.5551563", "0.5543301", "0.55398977", "0.5538195", "0.5535494", "0.5532053", "0.552723", "0.55248165", "0.55198896", "0.55189157", "0.55074054", "0.5503656", "0.5501074", "0.55010104", "0.54990447", "0.54961044", "0.54926205", "0.54912645" ]
0.6433049
2
Raises a ValueError if matrix `value` is not square.
def assert_square(name: str, value: np.ndarray) -> None: if not len(value.shape) == 2 or value.shape[0] != value.shape[1]: raise ValueError(f"{name} must be a square")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_square(self):\n if self.rows != self.cols:\n raise IndexError(\"Matrix is not square\")", "def test_change_basis_raises_not_square(self, fun):\n A = np.random.rand(4, 6)\n with pytest.raises(ValueError, match=\"The input matrix is not square\"):\n fun(A)", "def check_squareness(A):\n if len(A) != len(A[0]):\n raise ArithmeticError(\"Matrix must be square to inverse.\")", "def check_squareness(A):\n if len(A) != len(A[0]):\n raise ArithmeticError(\"Matrix must be square to inverse.\")", "def check_squareness(self, Am):\r\n if len(Am) != len(Am[0]):\r\n raise ArithmeticError(\"Matrix must be square to inverse.\")", "def test_expend_not_square(self):\n with pytest.raises(ValueError, match=\"The input matrix is not square\"):\n symplectic.expand_passive(np.ones((3, 2)), [0, 1, 2], 5)", "def valid_square(self, row, col, value):\n # Check that the row and col are valid puzzle indices\n if not ((0 <= row < self.sl) and (0 <= col < self.sl)):\n return False\n\n # Check that the square input is empty\n if self.puzzle[row][col] != 0:\n return False\n \n # Check that the value input is a valid puzzle value\n if not (1 <= value <= self.sl):\n if self.puzzle[row][col] == 0 and value == 0:\n return True\n return False\n \n # Check each row, column and block for same number\n for i in range(self.sl): \n if self.puzzle[row][i] == value: # Check each square in row for same value\n return False\n if self.puzzle[i][col] == value: # Check each square in col for same value\n return False\n \n # Check each square in box for same value, a little more complex index-wise\n r = self.bs*(row//self.bs) + (i//self.bs) \n c = self.bs*(col//self.bs) + (i%self.bs) \n if self.puzzle[r][c] == value:\n return False\n \n return True", "def test_5_size_less_than_1(self):\r\n with self.assertRaises(ValueError):\r\n S4 = Square(0)", "def test_data_value(self):\n self.assertRaises(ValueError, Square, 0, 2, 3)\n self.assertRaises(ValueError, Square, -2)\n self.assertRaises(ValueError, Square, 3, -3, 2)\n self.assertRaises(ValueError, Square, 2, 3, -2)", "def test_badsizevaluewithtuple(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square((1, 2), 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')", "def validate_square(cls, square):\n if len(square) > 3:\n raise ValueError('Invalid square')\n\n cls._get_row_fow_letter(square[0])\n square_column = int(square[1:])\n if square_column not in range(1, 11):\n raise ValueError('The number of the column must be '\n 'an integer between 1 to 10')", "def square(value):\n return value ** 2", "def is_square(matrix):\n return is_matrix(matrix) and matrix.shape[0] == matrix.shape[1]", "def test_badsizevaluelists(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square([1, 2], 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')", "def can_add_to_square(self, tile, value):\n start_row = tile.row // self.board_squared * self.board_squared\n start_col = tile.column // self.board_squared * self.board_squared\n\n for row in range(start_row, start_row + self.board_squared):\n for col in range(start_col, start_col + self.board_squared):\n if self.puzzle[row][col].value == value:\n return False\n\n return True", "def test_badsizevaluefuncs(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(print(), 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')", "def test_badsizevaluefloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(float(1), 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')", "def test_set_cell_with_too_large_column(self):\n self.assertRaises(ValueError, self.sudoku.set_cell, (0, 9), 0)", "def test_sd_under_exception(self):\n z_matrix = np.array(\n [[ 0.0, 0.0, 1.0], # noqa: E201\n [-0.1, 0.2, 0.8], # noqa: E201\n [ 0.2, 0.4, 0.6], # noqa: E201\n [ 0.3, 0.7, 0.3], # noqa: E201\n [ 0.6, 0.8, 0.2], # noqa: E201\n [ 0.8, 0.9, 0.1], # noqa: E201\n [ 1.0, 1.0, 0.0]], # noqa: E201\n dtype=np.float64)\n self.assertRaises(ValueError, mcdm.weigh, z_matrix, \"SD\")", "def test_value_error(self):\n self._error_test(ValueError)", "def test_errors(self):\n self.assertRaises(TypeError, columnize, 5, 'reject input - not array')\n return", "def test_sd_over_exception(self):\n z_matrix = np.array(\n [[0.0, 0.0, 1.1],\n [0.1, 0.2, 0.8],\n [0.2, 0.4, 0.6],\n [0.3, 0.7, 0.3],\n [0.6, 0.8, 0.2],\n [0.8, 0.9, 0.1],\n [1.0, 1.0, 0.0]],\n dtype=np.float64)\n self.assertRaises(ValueError, mcdm.weigh, z_matrix, \"SD\")", "def handle_errors(self, value):\n if not isinstance(value, int):\n raise TypeError(\"size must be an integer\")\n if value < 0:\n raise ValueError(\"size must be >= 0\")", "def is_square(m):\n if not hasattr(m, '__len__'):\n return False\n\n is_flat_square_matrix = all(np.isscalar(c) for c in m) and np.sqrt(len(m)).is_integer()\n if is_flat_square_matrix:\n return True\n\n is_structed_square_matrix = all(len(row) == len(m) for row in m)\n return is_structed_square_matrix", "def test_badsizevaluesets(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square({1, 2, 3}, 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')", "def test_convert_matrix():\n foo = Value(matrices=[[1.0, 2.0], [-2.0, 1.0]])\n assert foo.matrices[0][0][1].value == 2.0\n assert foo.matrices[0][1][0].value == -2.0", "def __allowed_values_correct_matrix(self):\n strTestName = 'Values of a Numpy Array 2D (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy Array 2D')\n RxCSObject.paramAllowed('parameter1', range(int(2e3)))\n RxCSObject.parameter1 = np.random.randint(1, 1e3, (1e2, 1e1))\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_set_cell_with_too_large_row(self):\n self.assertRaises(ValueError, self.sudoku.set_cell, (9, 0), 0)", "def test_square(self, board, row, col, test):\n if row < 0 or row > 7:\n return False\n if col < 0 or col > 7:\n return False\n \n return test(board[row][col])", "def test_Sobol_G_raises_error_if_values_wrong_size():\n a = [1, 2, 3, 4, 5, 6, 7, 8]\n with raises(ValueError):\n evaluate(np.array([1, 2, 3, 4, 5, 6, 7]), a)", "def square(x):\n if type(x) not in (int, float, int, complex):\n raise TypeError(\"argument must be a number\")\n\n return x*x", "def __size_restriction_incorrect_matrix_number(self):\n\n strTestName = 'Matrix size lower than a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizL('parameter1', 15)\n\n RxCSObject.parameter1 = np.random.randn(3, 5)\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)", "def square_check(self):\n return len(self.matrix) == len(self.matrix[0])", "def test_width_message_errors(self):\n s = Square(10)\n with self.assertRaisesRegex(TypeError, \"width must be an integer\"):\n s = Square(\"holberton\")\n with self.assertRaisesRegex(TypeError, \"width must be an integer\"):\n s = Square(True)\n with self.assertRaisesRegex(TypeError, \"width must be an integer\"):\n s = Square((1, 2))\n with self.assertRaisesRegex(TypeError, \"width must be an integer\"):\n s = Square([3, 4])\n with self.assertRaisesRegex(TypeError, \"width must be an integer\"):\n s = Square({'key': 1})\n with self.assertRaisesRegex(TypeError, \"width must be an integer\"):\n s = Square(5.25)\n with self.assertRaisesRegex(ValueError, \"width must be > 0\"):\n s = Square(-1)\n with self.assertRaisesRegex(ValueError, \"width must be > 0\"):\n s = Square(0)", "def test_non_2D_dvalues_array(self):\n self.assertRaises(splines.DValuesError, splines.Spline.__init__,\n splines.Spline, np.array([0.1, 0.5, 0.9]),\n np.array([1, 2, 3, 4]))", "def test_badsizevaluebool(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(True, 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')", "def test_6_x_integer(self):\r\n with self.assertRaises(TypeError):\r\n S5 = Square(1, 'a')", "def _validate_matrix_shape(matrix: FieldMatrix, shape: Tuple[int, int]):\n if len(matrix) != shape[0]:\n raise ValueError(\n 'Invalid matrix row len = %d: not consistent with expected shape: %s.' %\n (len(matrix), shape))\n\n for m in matrix:\n if len(m) != shape[1]:\n raise ValueError(\n 'Invalid matrix col len = %d: not consistent with expected shape: %s.'\n % (len(m), shape))", "def test_badyvaluewithtuple(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, (1, 2), 3)\n self.assertEqual(str(e.exception), 'y must be an integer')", "def testRaisesErrorValueMismatch(self):\n c = Simulation(logging_level=logging.CRITICAL)\n c.set_simulation_parameters(\n seed=4,\n task=32,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n sigma=2,\n tau=2,\n deme=1,\n sample_size=0.1,\n max_time=10,\n dispersal_relative_cost=1,\n min_num_species=1,\n )\n c.set_map_files(\n sample_file=\"sample/SA_samplemaskINT.tif\",\n fine_file=\"sample/SA_sample_coarse_zeros.tif\",\n dispersal_map=\"sample/dispersal_fine_cumulative.tif\",\n )\n with self.assertRaises(RuntimeError):\n c.run()", "def test_0size(self):\n Square.reset_objects()\n with self.assertRaises(ValueError) as e:\n s1 = Square(0)\n self.assertEqual(str(e.exception), \"width must be > 0\")", "def insert(self, row, col, value):\n if self.valid_square(row, col, value) or value == 0:\n self.puzzle[row][col] = value\n return True\n return False", "def validateDouble(value, errorMessage):\n if not isinstance(value, numbers.Real) or isInfinite(value):\n raise ShapeException(errorMessage)", "def square(self, row, col):\n if 0 == row:\n if 0 == col:\n return self.tl\n elif 1 == col:\n return self.tc\n elif 2 == col:\n return self.tr\n elif 1 == row:\n if 0 == col:\n return self.ml\n elif 1 == col:\n return self.mc\n elif 2 == col:\n return self.mr\n elif 2 == row:\n if 0 == col:\n return self.bl\n elif 1 == col:\n return self.bc\n elif 2 == col:\n return self.br\n raise TypeError(\n \"No such (row, column) pair: each must be in range 0-2 inclusive\")", "def test_NaNsize(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(float('nan'), 10, 5, 7)\n self.assertEqual(str(e.exception), 'width must be an integer')", "def _check_matrix(self, x, *args):\n if self._special and x.determinant() != 1:\n raise TypeError('matrix must have determinant one')\n F = self.invariant_bilinear_form()\n if x * F * x.transpose() != F:\n raise TypeError('matrix must be orthogonal with respect to the invariant form')\n # TODO: check that quadratic form is preserved in characteristic two", "def __size_restriction_correct_matrix_number(self):\n\n strTestName = 'Matrix size higher or equal to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizHE('parameter1', 13)\n\n RxCSObject.parameter1 = np.random.randn(3, 5)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_init_value_error(self):\n data = [[0, 0], [0, 0], [0, 0]]\n with self.assertRaises(ValueError):\n Board(data)", "def test_get_cell_with_too_large_column(self):\n self.assertRaises(ValueError, self.sudoku.get_cell, (0, 9))", "def __size_restriction_incorrect_matrix_matrix(self):\n\n strTestName = 'Matrix size lower than the size of a matrix (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('mRefParameter1', 'String ref. parameter')\n RxCSObject.paramType('mRefParameter1', np.ndarray)\n\n # Now, let us define a matrix\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizL('parameter1', 'mRefParameter1')\n\n RxCSObject.mRefParameter1 = np.random.randn(2, 2)\n RxCSObject.parameter1 = np.random.randn(2, 2)\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)", "def rectify(matrix, method='half'):\n if method=='half':\n output = numpy.where(matrix<0, 0, matrix)\n else:\n output = numpy.asarray((matrix**2)**0.5)\n \n if type(matrix) in [float, int]:\n #return a single value\n try: #some versions of num return a rank-1 array\n return output[0]\n except: #others return a rank-0 array wihch needs this\n return float(output)\n else:\n #return a matrix\n return output", "def test_negativesize(self):\n Square.reset_objects()\n with self.assertRaises(ValueError) as e:\n s1 = Square(-1)\n self.assertEqual(str(e.exception), \"width must be > 0\")", "def test_get_cell_with_too_large_row(self):\n self.assertRaises(ValueError, self.sudoku.get_cell, (9, 0))", "def test_to_scaler_non_allowed_value_error(self):\n\n with pytest.raises(\n ValueError,\n match=r\"\"\"scaler should be one of; \\['min_max', 'max_abs', 'standard'\\]\"\"\",\n ):\n\n ScalingTransformer(columns=\"b\", scaler=\"zzz\", scaler_kwargs={\"a\": 1})", "def _validate_internal_value_singular(self, value: Any):\n # Make sure the value is of our type\n if not isinstance(value, self._type):\n raise TypeError(f\"Expected {self._type.__name__} but got {type(value).__name__}\")", "def test_badxvaluewithtuple(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, (1, 2), 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')", "def is_square(self):\n return self.shape[0] == self.shape[1]", "def is_square(self):\n return self.shape[0] == self.shape[1]", "def test_negativeyvalue(self):\n Square.reset_objects()\n with self.assertRaises(ValueError) as e:\n s1 = Square(1, 2, -2)\n self.assertEqual(str(e.exception), \"y must be >= 0\")", "def check_resize_size(size):\n if isinstance(size, int):\n check_value(size, (1, FLOAT_MAX_INTEGER))\n elif isinstance(size, (tuple, list)) and len(size) == 2:\n for i, value in enumerate(size):\n check_value(value, (1, INT32_MAX), \"size at dim {0}\".format(i))\n else:\n raise TypeError(\"Size should be a single integer or a list/tuple (h, w) of length 2.\")", "def fill(self,value):\n if value is None:\n return\n if isinstance(value,numbers.Number):\n for i in range(self.nrows):\n for j in range(self.ncols):\n self.matrix[i][j] = value\n elif isinstance(value,list):\n if len(value) != self.nrows*self.ncols:\n raise ValueError('matrix fill value has incorrect number of elements')\n\n if not all(isinstance(item,numbers.Number) for item in value):\n raise TypeError('matrix fill value not a list of numbers')\n index = 0\n for i in range(self.nrows):\n for j in range(self.ncols):\n self.matrix[i][j] = value[index]\n index += 1 \n else:\n raise TypeError('matrix fill value not a number')", "def square_value(a):\n try:\n out = a*a\n except TypeError:\n raise TypeError(\"Input should be a string:\")\n\n return out", "def test_validate_2d(data, msg):\n if msg:\n with pytest.raises(ValueError, match=msg):\n _ = _validate_2d(data, 'test')\n else:\n assert data == _validate_2d(data, 'test')", "def test_autonne_error():\n n = 10\n m = 20\n A = np.random.rand(n, m)\n with pytest.raises(ValueError, match=\"The input matrix is not square\"):\n symplectic.autonne(A)\n n = 10\n m = 10\n A = np.random.rand(n, m)\n with pytest.raises(ValueError, match=\"The input matrix is not symmetric\"):\n symplectic.autonne(A)", "def test_NaNall(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(\n float('nan'), float('nan'), float('nan'), float('nan'))\n self.assertEqual(str(e.exception), 'width must be an integer')", "def test_badyvaluewithfloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, float(1), 3)\n self.assertEqual(str(e.exception), 'y must be an integer')", "def test_sum_squares(self):\n fun = get_problem('sum_squares', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def set_square(self, col, row, value):\n row_index = row - 1\n col_index = ord(col.lower()) - 97 # ord('a') is 97\n self.state[row_index][col_index] = value", "def _validate_values(self, values):\n prev_len = -1\n i = j = -1\n if values is None or len(values) == 0:\n self.shape = 0, 0\n return\n for i, row in enumerate(values):\n if prev_len == -1:\n prev_len = len(row)\n if prev_len != len(row):\n raise ValueError(f\"Row {i} differs in length: {prev_len} != {len(row)}\")\n for j, val in enumerate(row):\n if type(val) not in (int, float, complex):\n raise ValueError(f\"[{i}, {j}]: {val} is of bad type ({type(val)})\")\n if val == 0:\n self.empty_loc = (i, j)\n if i == -1:\n self.shape = 0, 0\n else:\n self.shape = i + 1, j + 1", "def validate_matrix(self, data, **kwargs):\n validate_matrix(data.get(\"params\"))", "def check_magic_square(square, square_edge):\n\n def check(square_edge, list_to_check):\n # NB There is many ways to that:\n # as the zen said simple is better than complex...\n\n # The formula is M = (n²(n²+1)/2) / n\n constant = ((square_edge**2) * (square_edge**2 + 1) / 2) / square_edge\n\n for el in list_to_check:\n # We compare that each iterator elements is equal to the magic constant\n assert el == constant, \"{} is not magic\".format(list_to_check)\n\n # Check constant for each row\n check(square_edge, numpy.sum(square, axis=0))\n\n # Check constant for each column\n check(square_edge, numpy.sum(square, axis=1))\n\n # Check constant for diagonal\n check(square_edge, [numpy.sum(square.diagonal())])\n\n return print(\"Correct \\n\")", "def is_perfect_square():", "def test_width_is_not_int(self):\n with self.assertRaisesRegex(TypeError, \"width must be an integer\"):\n Square(\"1\", 1)", "def test_change_basis_raises_not_even(self, fun, dim):\n size = (5,) * dim\n A = np.random.rand(*size)\n with pytest.raises(ValueError, match=\"The input array is not even-dimensional\"):\n fun(A)", "def test_4_size_integer(self):\r\n with self.assertRaises(TypeError):\r\n S3 = Square('a')", "def test_2_no_args_square(self):\r\n with self.assertRaises(TypeError):\r\n S1 = Square()", "def test_badsizevaluewithstring(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(\"foo\", 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')", "def is_square(self):\n lines, columns = self.get_size()\n return lines == columns", "def cell(self, value):\n if value is not None:\n value.get_shape().assert_is_compatible_with(self._output_shape)\n self._cell = value", "def test_check_matrix():\n R_list = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n R = pr.check_matrix(R_list)\n assert_equal(type(R), np.ndarray)\n assert_equal(R.dtype, np.float64)\n\n R_int_array = np.eye(3, dtype=int)\n R = pr.check_matrix(R_int_array)\n assert_equal(type(R), np.ndarray)\n assert_equal(R.dtype, np.float64)\n\n R_array = np.eye(3)\n R = pr.check_matrix(R_array)\n assert_array_equal(R_array, R)\n\n R = np.eye(4)\n assert_raises_regexp(\n ValueError, \"Expected rotation matrix with shape\",\n pr.check_matrix, R)\n\n R = np.array([[1, 0, 0], [0, 1, 0], [0, 0.1, 1]])\n assert_raises_regexp(\n ValueError, \"inversion by transposition\", pr.check_matrix, R)\n\n R = np.array([[1, 0, 1e-16], [0, 1, 0], [0, 0, 1]])\n R2 = pr.check_matrix(R)\n assert_array_equal(R, R2)\n\n R = -np.eye(3)\n assert_raises_regexp(ValueError, \"determinant\", pr.check_matrix, R)", "def __size_restriction_incorrect_matrix_parameter(self):\n\n strTestName = 'Matrix size higher or equal to a parameter (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a matrix\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizHE('parameter1', 'iRefParameter1', add=2)\n\n RxCSObject.iRefParameter1 = 20\n RxCSObject.parameter1 = np.random.randn(4, 4)\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)", "def test_unknown_weigh_exception(self):\n z_matrix = np.array(\n [[0.0, 0.0, 1.0],\n [0.1, 0.2, 0.8],\n [0.2, 0.4, 0.6],\n [0.3, 0.7, 0.3],\n [0.6, 0.8, 0.2],\n [0.8, 0.9, 0.1],\n [1.0, 1.0, 0.0]],\n dtype=np.float64)\n self.assertRaises(ValueError, mcdm.weigh, z_matrix, \"Unknown\")", "def test_invalid_grid_values_greater_than_1(self):\n self.assertRaises(ValueError, splines.Spline.__init__, splines.Spline,\n np.array([0, 0.5, 1.1]), np.array([1, 2, 3, 4]))", "def test_create_matrix(self):\n test_matrix = investment_growth.create_matrix(5, 2)\n self.assertEqual(0, test_matrix[1][4])\n with self.assertRaises(Exception):\n test_matrix[2][5]", "def test_negativexvalue(self):\n Square.reset_objects()\n with self.assertRaises(ValueError) as e:\n s1 = Square(1, -2)\n self.assertEqual(str(e.exception), \"x must be >= 0\")", "def magic_check(magic_square, magic_value):\n row_sum = np.sum(magic_square, axis=0)\n col_sum = np.sum(magic_square, axis=1)\n diag_sum = np.sum(np.diag(magic_square))\n if : # part b\n return True\n else:\n return False", "def test_squared_moving_integration_valuechecks(x, window_length):\n from sleepecg._heartbeat_detection import _squared_moving_integration\n with pytest.raises(ValueError):\n _squared_moving_integration(x, window_length)", "def test_Sobol_G_raises_error_if_values_wrong_size():\n a = [1, 2, 3, 4, 5, 6, 7, 8]\n evaluate(np.array([1, 2, 3, 4, 5, 6, 7]), a)", "def test_exception():\n mat2D = MatrixDouble([[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0]])\n mat3D = MatrixDouble([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]])\n\n with pytest.raises(ValueError):\n simplify_line_2d(mat3D, 0.1, True)\n\n with pytest.raises(ValueError):\n simplify_line_3d(mat2D, 0.1, True)", "def test_sparsity(self):\n\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, \"\n \"and it is not a 'scipy.sparse.csr_matrix'. It is \"\n \"STRONGLY RECOMMENDED to use this type of matrix for \"\n \"the given sparsity.\"\n ],\n )", "def test_wrong_shape(self):\n scores = np.column_stack((np.ones(10), np.ones(10)))\n with self.assertRaises(ValueError):\n calc_disc(scores)", "def is_square(mtx):\n for line in mtx:\n if len(line) != len(mtx):\n return False\n return True", "def diagonal(size, value):\n if not isinstance(size, int):\n raise TypeError(\"int expected, not {}\".format(type(size)))\n\n if not mathutil.is_scalar(value):\n raise TypeError(\n \"int, float or complex expected, not {}\".format(type(value)))\n\n sc = SparkContext.getOrCreate()\n\n shape = (size, size)\n dtype = type(value)\n\n nelem = shape[0]\n\n if value == dtype():\n rdd = sc.emptyRDD()\n else:\n num_partitions = util.get_num_partitions(\n sc,\n util.get_size_of_type(dtype) * nelem\n )\n\n rdd = sc.range(size, numSlices=num_partitions).map(\n lambda m: (m, m, value)\n )\n\n return Matrix(rdd, shape, dtype=dtype, nelem=nelem)", "def _validate_tiling(inst: Any, attr: Any, value: List[List[float]]) -> None:\n if len(value) == 0:\n raise ValueError(\"Tiling must have at least 1 row\")\n\n if any(len(t) == 0 for t in value):\n raise ValueError(\"Tiling must have at least 1 column\")\n\n if min(len(t) for t in value) != max(len(t) for t in value):\n raise ValueError(\"Tiling must have the same number of column for each row.\")", "def test_validate_failure_bad_config(self, value):\n sch = scheme.Scheme()\n with pytest.raises(errors.SchemeValidationError):\n sch.validate(value)", "def test_exceptions(self):\n\n with self.assertRaises(TypeError, msg=\"size must be an integer\"):\n Square(6.5)\n with self.assertRaises(TypeError, msg=\"size must be an integer\"):\n Square(False)\n with self.assertRaises(TypeError, msg=\"size must be an integer\"):\n Square(6j)\n with self.assertRaises(TypeError, msg=\"size must be an integer\"):\n Square([4, 1])\n with self.assertRaises(TypeError, msg=\"size must be an integer\"):\n Square({\"Milk\": 1})\n with self.assertRaises(TypeError, msg=\"size must be an integer\"):\n Square((3, 9))\n with self.assertRaises(TypeError, msg=\"x must be an integer\"):\n Square(2, 3.5, 4)\n with self.assertRaises(TypeError, msg=\"x must be an integer\"):\n Square(2, True, 4)\n with self.assertRaises(TypeError, msg=\"x must be an integer\"):\n Square(2, 3j, 4)\n with self.assertRaises(TypeError, msg=\"x must be an integer\"):\n Square(2, \"3\", 4)\n with self.assertRaises(TypeError, msg=\"x must be an integer\"):\n Square(2, [3], 4)\n with self.assertRaises(TypeError, msg=\"x must be an integer\"):\n Square(2, {\"My\": 3}, 4)\n with self.assertRaises(TypeError, msg=\"x must be an integer\"):\n Square(2, (3, 3), 4)\n with self.assertRaises(TypeError, msg=\"y must be an integer\"):\n Square(2, 3, 4.2)\n with self.assertRaises(TypeError, msg=\"y must be an integer\"):\n Square(2, 3, False)\n with self.assertRaises(TypeError, msg=\"y must be an integer\"):\n Square(2, 3, 4j)\n with self.assertRaises(TypeError, msg=\"y must be an integer\"):\n Square(2, 3, \"4\")\n with self.assertRaises(TypeError, msg=\"y must be an integer\"):\n Square(2, 3, [4])\n with self.assertRaises(TypeError, msg=\"y must be an integer\"):\n Square(2, 3, {\"its\": 4})\n with self.assertRaises(TypeError, msg=\"y must be an integer\"):\n Square(2, 3, (4, 4))\n with self.assertRaises(ValueError, msg=\"size must be > 0\"):\n Square(0)\n with self.assertRaises(ValueError, msg=\"size must be > 0\"):\n Square(-2)\n with self.assertRaises(ValueError, msg=\"x must be >= 0\"):\n Square(1, -8, 9)\n with self.assertRaises(ValueError, msg=\"y must be >= 0\"):\n Square(1, 8, -9)", "def valid(game_board, value, row, col):\n if len(value) > 1:\n value = \"X\"\n # Check row of new position\n for i in range(len(game_board[row])):\n if game_board[row][i] == value and i != col:\n return False\n\n # Check column of new position\n for i in range(len(game_board)):\n if game_board[i][col] == value and i != row:\n return False\n\n # Check the 3x3 square area\n start_row = 3 * (row // 3)\n start_col = 3 * (col // 3)\n for i in range(start_row, start_row+3):\n for j in range(start_col, start_col+3):\n if game_board[i][j] == value and i != row and j != col:\n return False\n\n return True", "def can_add_value(self, tile, value):\n if value < 1 or value > len(self.puzzle):\n return False\n if not self.can_add_to_square(tile, value):\n return False\n if not self.can_add_to_row(tile, value):\n return False\n if not self.can_add_to_column(tile, value):\n return False\n return True", "def test_from_matrix(self):\n self.assertTrue(np.all(rowan.from_matrix(np.eye(3)) == one))\n\n with self.assertRaises(ValueError):\n self.assertTrue(np.allclose(rowan.from_matrix(2 * np.eye(3))))\n\n mat = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])\n\n self.assertTrue(\n np.logical_or(\n np.allclose(rowan.from_matrix(mat), half),\n np.allclose(rowan.from_matrix(mat), -half),\n )\n )\n\n mat = np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]])\n v = np.copy(half)\n v[3] *= -1\n self.assertTrue(np.allclose(rowan.from_matrix(mat), v))", "def set_matrix_cell(matrix, value, row, column):\n OpenMaya.MScriptUtil.setDoubleArray(matrix[row], column, value)" ]
[ "0.70691025", "0.6647211", "0.62668157", "0.62668157", "0.6163664", "0.6089783", "0.6032893", "0.5889549", "0.58770996", "0.58336306", "0.57889503", "0.57666147", "0.5684565", "0.5575981", "0.5559106", "0.5558494", "0.553429", "0.55041176", "0.5487154", "0.5484843", "0.547051", "0.5455025", "0.544322", "0.54421824", "0.54277766", "0.5418165", "0.5416506", "0.5412892", "0.54055595", "0.5360501", "0.53582466", "0.5345235", "0.5336413", "0.5332133", "0.5323899", "0.53154147", "0.5294586", "0.5283838", "0.5267505", "0.52654546", "0.5255874", "0.5252432", "0.52459383", "0.52360344", "0.52354026", "0.52213985", "0.52043635", "0.520316", "0.5201665", "0.51787055", "0.51523274", "0.5149227", "0.51459473", "0.51433516", "0.51360404", "0.5126387", "0.5120431", "0.5120431", "0.5120274", "0.5119926", "0.51027334", "0.50986034", "0.5096013", "0.50887257", "0.5085751", "0.5085567", "0.50851375", "0.5078476", "0.5077796", "0.5074969", "0.50727683", "0.50726545", "0.5069328", "0.5060177", "0.5057899", "0.5054981", "0.50527513", "0.50487405", "0.5047492", "0.504731", "0.50466555", "0.5045249", "0.5045147", "0.50389534", "0.5034725", "0.50211954", "0.5018881", "0.50082743", "0.50066227", "0.50038916", "0.50031316", "0.5003011", "0.50023615", "0.49936292", "0.49912474", "0.49860352", "0.49808213", "0.49804184", "0.49783757", "0.49765593" ]
0.7312172
0
Calculates the Shannon entropy for probabilities `ps` with `base`.
def shannon_entropy(ps: np.ndarray, base: int = 2) -> float: return -np.sum(ps * np.log(ps) / np.log(base))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entropy(self, base: int = None):\n\n # shannon entropy in nats\n fdist_ = self.fdist\n fdist_[\"prob\"] = fdist_[\"freq\"] / fdist_[\"freq\"].sum()\n fdist_[\"logp\"] = np.log(fdist_[\"prob\"])\n fdist_[\"nats\"] = -fdist_[\"prob\"] * fdist_[\"logp\"]\n entropy_ = fdist_[\"nats\"].sum()\n\n # convert base\n if base:\n entropy_ = entropy_ / np.log(base)\n\n # return\n return entropy_", "def shannon(counts, base=2):\n freqs = counts/float(counts.sum())\n nonzero_freqs = freqs[freqs.nonzero()]\n return -sum(nonzero_freqs*log(nonzero_freqs))/log(base)", "def entropy_numba(p):\n return 2 ** np.sum(-p*np.log2(p+1e-10))", "def _entropy2(labels, base=None):\n\n n_labels = len(labels)\n\n if n_labels <= 1:\n return 0\n\n value,counts = np.unique(labels, return_counts=True)\n probs = counts / n_labels\n n_classes = np.count_nonzero(probs)\n\n if n_classes <= 1:\n return 0\n\n ent = 0.\n\n # Compute entropy\n base = e if base is None else base\n for i in probs:\n ent -= i * log(i, base)\n\n # quick observation shows ent between 0.0 and 4.0.\n return ent", "def entropy_py(p):\n return 2 ** np.sum(-p*np.log2(p+1e-10))", "def calculate_entropy(prob):\n return -(prob * math.log(prob,2))", "def chl_entropy(y, base=2):\n p,bins = histogram(y, bins=unique(y)) # don't use 'Normed' feature, since that includes the bin-width!\n p = p[p!=0]/float(len(y))\n S = -1.0*sum(p*log(p))/log(base)\n return S", "def cross_entropy(p, q, base=2):\n q = ma.array(q, mask=(q == 0))\n return - np.vdot(p, ma.log(q)) / np.log(base)", "def shannon_entropy(probs):\n return -(\n math.sum([px * math.log2(px) if px != 0 and not (np.isclose(px, 0)) else 0 for px in probs])\n )", "def equitability(counts, base=2):\n return shannon(counts, base)/(log((counts!=0).sum())/log(base))", "def entropy(self, params):\n log_std = params[:, :, 1]\n return (log_std + 0.5 * (self.LOG2PI + 1)).sum(dim=-1)", "def entropy(probabilities):\n return -(sum([p * log(p, 2) if p > 0 else 0 for p in probabilities]))", "def entropy(p):\n assert (p >= 0).all()\n assert abs(np.sum(p)-1) < 1e-6\n return -np.sum(p*np.log(p+1e-12))", "def get_entropy_of_labels(labels, base=None):\n\n n_labels = len(labels)\n\n if n_labels <= 1:\n return 0\n\n value,counts = np.unique(labels, return_counts=True)\n probs = counts / n_labels\n n_classes = np.count_nonzero(probs)\n\n if n_classes <= 1:\n return 0\n\n ent = 0.\n\n # Compute entropy\n base = e if base is None else base\n for i in probs:\n ent -= i * log(i, base)\n\n return ent", "def entropy(x, bins, normalize=False, xy_probabilities=False):\n # calculate probabilities if xy_probabilities == False\n if xy_probabilities:\n # if x does not sum up to 1, raise an error\n if not np.isclose(sum(x),1,atol=0.0001):\n raise ValueError('Probabilities in vector x do not sum up to 1.')\n \n # add a small number to all probabilities if zero occurs\n if x.any(0):\n p = x + 1e-15\n else:\n p = x\n else:\n # get the bins\n bins = np.histogram_bin_edges(x, bins)\n\n # calculate the empirical probabilities\n count = np.histogram(x, bins=bins)[0]\n\n # if counts should be None, raise an error\n if np.sum(count) == 0:\n raise ValueError('The histogram cannot be empty. Adjust the bins to ' +\n 'fit the data')\n # calculate the probabilities\n p = (count / np.sum(count)) + 1e-15\n\n\n # calculate the Shannon Entropy\n if normalize:\n # get number of bins\n nbins = len(p)\n # maximal entropy: uniform distribution\n normalizer = np.log2(nbins) \n\n return - p.dot(np.log2(p)) / normalizer\n else:\n return - p.dot(np.log2(p))", "def entropy_coefficient(filter1, filter2, base=2):\n\n if (type(filter1) is NullField) or (type(filter2) is NullField):\n return 0\n\n total_count = int(filter1.bit_size)\n\n f1_element_count = filter1.filter.count(True)\n f2_element_count = filter2.filter.count(True)\n\n prob_f1 = f1_element_count / total_count\n prob_f2 = f1_element_count / total_count\n\n e_f1 = -1.0 * total_count * prob_f1 * math.log(prob_f1) / math.log(base)\n e_f2 = -1.0 * total_count * prob_f2 * math.log(prob_f2) / math.log(base)\n\n entropy = abs(e_f1 - e_f2)\n\n # for element_count in Counter(data).values():\n # p = element_count / total_count\n # entropy -= p * math.log(p, self.base)\n\n assert entropy >= 0\n\n return 1 - entropy", "def entropy(s):\n p, lns = Counter(s), float(len(s))\n return -sum( count/lns * math.log(count/lns, 2) for count in p.values())", "def shannon_entropy(counts):\n freq = np.array(counts) * 1.0 / np.sum(counts)\n return -np.sum([f * np.log2(f) for f in freq if f != 0])", "def base_entropy_masked(seq_list, base_set, base_idx):\n # entropy analysis\n base_list = [seq[base_idx] for seq in seq_list]\n freq_dict = Counter(base_list)\n mask_list = ['-', 'N']\n n_seq = sum([freq_dict[base] for base in freq_dict if base not in mask_list])\n H = 0\n total_masked = 0\n for base in freq_dict:\n if base in mask_list:\n total_masked += freq_dict[base]\n continue\n P = freq_dict[base]/n_seq\n H -= log2(P) * P\n masked_pct = total_masked/len(base_list)\n return H, masked_pct", "def ShannonEntropy(self,s):\n e = s[np.nonzero(s)]**2 * np.log(s[np.nonzero(s)]**2)\n return np.sum(e)", "def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e", "def entropy(d, total, word_count):\n\t# Entropie je - Sum_morf p(morf) * log_2 p(morf)\n\t# p(morf) = c(morf) / c(all)\n\te = 0\n\tfor count in d.values():\n\t\tp = count/total\n\t\ttype_e = - p * log2(p)\n\t\te += type_e * count\n\treturn e / word_count", "def entropy(class_probabilities):\n return sum(-p * math.log(p, 2)\n for p in class_probabilities\n if p) #ignore 0's", "def entropy(p):\n ent = tf.where(p > np.finfo(np.float32).eps, -p * tf.log(p), tf.zeros_like(p))\n ent = tf.reduce_sum(ent, axis=1)\n return ent", "def entropy(class_probabilities):\n return sum(-p * math.log(p,2)\n for p in class_probabilities\n if p)", "def entropy_(P):\n res = 0.0\n\n mask = P != 0.0 # avoid 0 in log\n f = lambda x: x*np.log2(x)\n # map-reduce strategy (likely to be more optimized than loops)\n temp = list(map(f, P[mask]))\n res = -np.sum(temp, dtype=float)\n return res", "def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))", "def entropy(temp,pres):\n g_t = liq_g(1,0,temp,pres)\n s = -g_t\n return s", "def entropy(p_list):\n assert len(p_list) > 0\n E = 0.0\n for p in p_list:\n if p == 0.0:\n continue\n E += p*math.log(p)\n return E", "def shannon_entropy(c):\n\n c_normalized = c / float(np.sum(c))\n c_normalized_nonzero = c_normalized[np.nonzero(c_normalized)] # gives 1D array\n entropy = -sum(c_normalized_nonzero * np.log2(c_normalized_nonzero)) # unit in bits\n return entropy", "def entropy(P):\n P_nan = P.copy()\n P_nan[P_nan == 0] = np.nan\n return np.nansum(np.multiply(P_nan, np.log2(1 / P_nan)))", "def entropy(dist):\n #dist = array([max(d,1e-100) for d in dist])\n dist = dist + 1e-20\n return dot(dist,(log(1.0/dist) * (1.0/log(2.0))).T)", "def entropy(data):\n\n freqs = {}\n suma = len(data)\n\n for i in range(0, len(data)):\n freqs[data[i]] = 1.0 + freqs.get(data[i], 0)\n\n res = 0.0\n for i in freqs:\n res += (freqs[i] / suma) * log((freqs[i] / suma), 2)\n return -res", "def prob_t_N(genotype, base):\n cnter = Counter(genotype)\n return cnter.get(base, 0) * 1/len(genotype)", "def c_entropy(state,target=None,log_base=2):\n if target!=None:\n state = state.ptrace(target)\n return entropy(com_measure(state),base=log_base)", "def calc_entropy_one(states, T):\n\n # S = - kB \\sum_i p_i \\log p_i = - k_B <\\log p_i>\n P = calc_probabilities_one(states, T)\n return -kB * np.sum(P * np.log(P))", "def TransformBase(base:int, number:list, digts:int) -> int :\n i = 0\n res = 0\n while ( i < digts):\n index = digts - i - 1\n number[index] = int(number[index]) * (base ** i) \n res += number[index]\n i += 1\n return res", "def entropy(counts):\n assert (counts >= 0).all()\n probs = counts / counts.sum()\n probs = probs[probs > 0] # Avoid log(0)\n return - np.sum(probs * np.log2(probs))", "def entropyRandom(stream):\n prob = 1.0 / len(stream)\n return -(prob * log(prob, 2)) * len(stream)", "def entropy(self):\n Z = self.sum()\n assert (Z > 0), 'Non-normalizable factor (perhaps log factor?)' # also check for positivity?\n H = 0.0\n for x in np.nditer(self.t, op_flags=['readonly']):\n p = x/Z\n H += 0.0 if p==0 else -p*np.log(p)\n return H", "def entropy(a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n return (_fun.logbeta(a, b)\n - (a - 1)*mp.psi(0, a)\n - (b - 1)*mp.psi(0, b)\n + (a + b - 2)*mp.psi(0, a + b))", "def jensen_shannon_div_bern(p, w=None):\n\n n = len(p)\n p = np.array(p)\n\n # make weight matrix sum to 1\n if w is None:\n w = np.ones(n)\n w = (np.array(w) / np.sum(w)).reshape((1, n))\n\n p_sum = np.dot(w, p)\n entropy_of_sum = scipy.stats.bernoulli(p_sum).entropy()\n\n sum_of_entropies = np.dot(w, scipy.stats.bernoulli(p).entropy())\n\n return entropy_of_sum - sum_of_entropies", "def _entropy(P):\n\n #TODO remove the \"+ 1e-20\" inside the log2 computation\n # it's just a hack to avoid to compute log2(0)\n ent = -1.0 * np.sum(P * np.log2(P+1e-20), axis=0)\n return ent", "def entropy(message):\n n = len(message)\n message = letter_freq(message)\n h = 0\n for n_i in message.values():\n p_i = n_i/n\n h += -p_i*(log2(p_i))\n return h", "def entropy_function(c, n):\n return -(c*1.0/n)*math.log(c*1.0/n,2)", "def entropy(message):\n message = letter_freq(message)\n n = sum(message.values())\n h = 0\n for n_i in message.values():\n p_i = n_i / n\n h += -p_i * log2(p_i)\n return h", "def div(self):\n freqList = [i / sum(self.has.values()) for i in self.has.values()]\n entropies = [i * math.log(i, 2) for i in freqList]\n entropy = -sum(entropies)\n return entropy", "def entropy(self, priors=None):\n def entropy_f(x):\n x[x != 0] *= np.log(x[x != 0])\n return -x.sum(axis=0)\n return self.utility(entropy_f, priors)", "def entropy(self, args):\n mean, stddev = args\n dist = tfp.distributions.Normal(loc=mean, scale=stddev)\n entropy = dist.entropy()\n return entropy", "def entropy(p: torch.Tensor):\n nz = (p > 0).to(p.device)\n\n eps = torch.finfo(p.dtype).eps\n p_stable = p.clone().clamp(min=eps, max=1 - eps)\n\n out = torch.where(\n nz,\n p_stable * torch.log(p_stable),\n torch.tensor(0.0, device=p.device, dtype=torch.float),\n )\n\n return -(out).sum(-1)", "def _fe_compute_domain_entropy(sample):\n # Compute entropy of domain.\n result = OrderedDict()\n p, lns = Counter(sample['domain']), float(len(sample['domain']))\n entropy = -sum(count / lns * math.log(count / lns, 2) for count in list(p.values()))\n\n result['entropy'] = entropy\n return result", "def calculate_entropy():\n\tstat = {} # dictionary - chars and number of repetitions\n\tallchar = 0.0 # total number of characters\n\tentropy = 0.0 # initial entropy\n\n\tfor line in sys.stdin.readlines():\n\t\tline = re.sub(r'\\s', '', line)\n\t\tfor znak in line:\n\t\t\tif znak in stat:\n\t\t\t\tstat[znak] += 1\n\t\t\telse:\n\t\t\t\tstat[znak] = 1\n\t\t\tallchar += 1\n\n\tfor znak in stat:\n\t\tstat[znak] = stat[znak]/allchar\n\t\tentropy += stat[znak] * log(stat[znak], 2)\n\n\tentropy *= -1\n\treturn entropy", "def entropy(l):\n\n probabilities = np.bincount(l) / len(l)\n with np.errstate(divide='ignore'): # ignore log(0) errors, we'll handle\n log_probabilities = np.log2(probabilities)\n log_probabilities[~np.isfinite(log_probabilities)] = 0\n return -np.sum(probabilities * log_probabilities)", "def prob_2_entropy(prob):\r\n n, c, h, w = prob.size()\r\n return -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c)", "def entropy(x):\n x_max, x_min = x.max(), x.min()\n assert (x_min >= 0) and (x_max <= 1)\n if x_min == x_max == 0:\n return np.float32(0.)\n # Take only non-zero values as log(0) = 0 :\n nnz_x = x[np.nonzero(x)]\n entr = -np.sum(nnz_x * np.log2(nnz_x))\n\n return entr", "def entropy(y):\n p = _proba(y)\n return (-p * np.log2(p)).sum()", "def entropyDistributed(distribution):\n return -sum(map(lambda p : p * log(p, 2), distribution))", "def spatial_entropy(map_):\n map_ = map_ / np.sum(map_)\n return -1 * np.sum(map_ * np.log(map_))", "def entropy(data):\n n, m = np.shape(data)\n data = np.tanh(data)\n data = data / np.sum(data, axis=0)\n a = data * 1.0\n a[np.where(data == 0)] = 0.000001\n\n e = (-1.0 / np.log(n)) * np.sum(data * np.log(a), axis=0)\n w = (1 - e) / np.sum(1 - e)\n return w", "def entropy(a):\n a = a.upper()\n\n freq = collections.defaultdict(int) # int() is the default constructor for non existent item, and returns 0\n for c in a:\n freq[c] = freq[c] + 1\n\n e = 0.0\n for f in freq.values():\n if f:\n p = f / len(a)\n e += p * math.log(p)\n\n return -e", "def entropy(data):\n strings, lens = Counter(data), np.float(len(data))\n return -sum(count / lens * np.log2(count / lens) for count in strings.values())", "def jensen_shannon_div(p, w=None):\n\n n, k = p.shape\n\n # make weight matrix sum to 1\n if w is None:\n w = np.ones(n)\n w = (np.array(w) / np.sum(w)).reshape((1, n))\n\n # weighted-average of the probability distributions\n p_avg = (w @ p)[0]\n\n return entropy_discrete(p_avg) - np.sum(w * entropy_discrete(p, axis=1))", "def __compute_entropy_probability(probability:np.ndarray) -> float:\n entropy = -np.sum(probability * np.log2(probability))\n return entropy", "def estimate_entropy(pwlen):\n return pwlen * math.log(len(frozenset(default_charlist)), 2)", "def entropy(self):\n ent = 0.0\n for f in self.byte_freq:\n if f > 0:\n freq = float(f) / self.byte_total\n ent = ent + freq * math.log(freq, 2)\n return -ent", "def entropy( freq ):\n N = 0.0\n entropy = 0.0\n for x, v in freq.items( ):\n N += v\n entropy -= v * math.log( v, 2 )\n return (N * math.log( N, 2 ) + entropy) / N", "def item_entropy(item_count, total_count):\n # Two cases where the entropy is 0\n if item_count == total_count or item_count == 0:\n return 0\n \n item_prob = 1.0 * item_count / total_count\n return -item_prob * math.log(item_prob)", "def entropy(self):\n raise NotImplementedError", "def entropy(img):\n # by calculating\n histogram = img.histogram()\n histogram_size = sum(histogram)\n histogram = [float(h) / histogram_size for h in histogram]\n\n return -sum([p * math.log(p, 2) for p in histogram if p != 0])", "def getAnsofBase(length, base):\n ans = 1\n for i in range(length-1):\n ans = ans * base + 1\n return ans", "def entropy(self, logits):\n probs = torch.exp(logits)\n entropy = - torch.sum(probs * logits, dim=-1)\n return entropy", "def entropy(string):\n p, lns = Counter(string), float(len(string))\n return -sum(count/lns * math.log(count/lns, 2) for count in p.values())", "def _entropy(data):\n hist = np.array(PIL.Image.fromarray(data).histogram())\n hist = hist / hist.sum()\n hist = hist[hist != 0]\n return -np.sum(hist * np.log2(hist))", "def logistic(mu, hw, x): \n n = np.exp(- ((x-mu)/(.477*hw))**2)\n return (2. * n)/( 1 + n)", "def entropy(n_bits):\n return n_bits and random.getrandbits(n_bits)", "def getEntropy(self, pVal, nVal):\n totVal = pVal + nVal\n if pVal == 0 or nVal == 0:\n return 0\n\n pProb = pVal/totVal\n nProb = 1 - pProb\n entropy = - (pProb * math.log(pProb, 2) + nProb * math.log(nProb, 2))\n return entropy", "def get_entropy(distribution, samples):\n entropy = -tf.reduce_sum(distribution.log_prob(samples), axis=1)\n return entropy", "def safe_sum_p_log_p(a, base=None):\r\n flat = ravel(a)\r\n nz = take(flat, nonzero(flat)[0])\r\n logs = log(nz)\r\n if base:\r\n logs /= log(base)\r\n return sum(nz * logs, 0)", "def _Apply(self, c, p):\n entropy = ((1.0 - c) * tf.log(1.0 - p) + c * tf.log(p)) / (-math.log(2))\n entropy = tf.reduce_mean(entropy)\n return entropy", "def entropy(strength=256, wordlist=wordlist):\n return os.urandom(strength // 8)", "def gaussian_entropy(log_std):\n return tf.reduce_sum(log_std + 0.5 * np.log(2.0 * np.pi * np.e), axis=-1)", "def entropy(self, policy_params):\n return self.head.entropy(policy_params)", "def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')", "def entropy(*args):\n\n\n values = []\n leaf = -1\n\n for i, val in enumerate(args):\n if(val != 0):\n values.append(val * math.log(val, len(args)))\n if(val == 1):\n leaf = i\n \n return -sum(values), leaf", "def H(self, data):\n entropy = 0\n\n if not data:\n return entropy\n\n for x in range(256):\n p_x = float(data.count(chr(x))) / len(data)\n if p_x > 0:\n entropy -= p_x * math.log(p_x, 2)\n\n return entropy", "def entropy(self, f):\n f_log = -torch.log(self.einsum(\"q,q->q\", [f, 1 / self.w]))\n return self.einsum(\"q,q->\", [f, f_log])", "def entropy(self):\n\n \"\"\"Gets the first neighbours, which are the first 2*r+1 cells.\"\"\"\n current_neighbours = []\n amount = [0] * self.k ** (2 * self.r + 1)\n for i in range(2 * self.r + 1):\n current_neighbours.append(self.config[self.t, i % self.width])\n\n \"\"\"Calculates the rule and adds one to it's amount. It then removes the\n leftmost cell and adds a cell to the right.\"\"\"\n for i in range(len(self.config[self.t]) - 1):\n rule = 0\n for j in range(len(current_neighbours)):\n rule += int(current_neighbours[j] *\n self.k ** ((2 * self.r + 1) - j - 1))\n amount[len(amount) - 1 - rule] += 1\n current_neighbours.pop(0)\n current_neighbours.append(\n self.config[self.t, (2 * self.r + 1 + i) % self.width])\n\n \"\"\"Calculates the rule for the last neighbourhood.\"\"\"\n rule = 0\n for j in range(len(current_neighbours)):\n rule += int(current_neighbours[j] *\n self.k ** ((2 * self.r + 1) - j - 1))\n amount[len(amount)-1 - rule] += 1\n\n \"\"\"Calculates the Shannon entropy and the the average entropy so far.\"\"\"\n shannon = 0\n for i in range(len(amount)):\n if(amount[i] != 0):\n probability = amount[i] / self.width\n shannon -= probability * np.log2(probability)\n self.average_entropy = (self.average_entropy *\n self.t + shannon) / (self.t + 1)", "def entropycell(self):\n cells = [0] * self.k\n for i in range(self.width):\n cells[int(self.config[self.t, i])] += 1\n\n \"\"\"Calculates the Shannon entropy and the the average entropy so far.\"\"\"\n shannon = 0\n for i in range(self.k):\n if(cells[i] != 0):\n probability = cells[i] / self.width\n shannon -= probability * np.log2(probability)\n self.average_cell = (self.average_cell * self.t + shannon) / (self.t + 1)", "def powerDigitSum(base, exp):\n\treturn sum(power(base, exp))", "def calc_entropy(data_set): #calculates total entropy of the dataset\r\n republicans = 0\r\n democrats = 0\r\n total = 0\r\n for data_point in data_set:\r\n party = data_point.dat_party\r\n if party == \"R\":\r\n republicans+=1\r\n elif party == \"D\":\r\n democrats+=1\r\n total+=1\r\n\r\n if total == 0: return 0\r\n prob_dem = democrats/total\r\n prob_rep = republicans/total\r\n if prob_dem == 0: return -(prob_rep * math.log(prob_rep, 2))\r\n if prob_rep == 0: return -(prob_dem * math.log(prob_dem, 2))\r\n\r\n entropy = (-prob_dem * math.log(prob_dem, 2)) -(prob_rep * math.log(prob_rep, 2))\r\n return entropy", "def entropy(y):\n total = y.size\n value_counts = np.bincount(y).astype(\"float\")\n proportions = value_counts / y.size\n\n return sum(-i * np.log(i) for i in proportions if i)", "def bernoulli_logpmf(X, p):\n return -T.nnet.binary_crossentropy(p, X).sum(axis=-1)", "def entropy(self, **kwargs) -> TensorType:", "def entropy(self, **kwargs) -> TensorType:", "def entropy(self):\n return -np.sum(self.log_likelihoods * np.exp(self.log_likelihoods))", "def update_LTM(self, base):\n # Generate binary number corresponding to sign\n # of each input variable.\n b = ((base > 0).astype(int)).astype(str).flatten()\n # Update frequency of relevant area.\n self.LTM[int(''.join(b), 2)] += 1", "def power(base, exp):\n base_v, base_d = Tensor.get_value_and_deriv(base)\n exp_v, exp_d = Tensor.get_value_and_deriv(exp)\n\n result = base_v ** exp_v\n a = base_d.mul(exp_v * base_v ** (exp_v - 1.0))\n b = exp_d.mul(result * np.log(base_v))\n return Tensor(result, a + b)", "def shannon(state_space):\n if isinstance(state_space, int) or len(state_space) == 1:\n return 0\n ws = sum(state_space.values())\n if ws == 0:\n print(state_space)\n return math.log(ws) - sum(map(lambda x: x * math.log(x), state_space.values())) / ws", "def statsfromcounts(self,countMatrix):\n countSum = np.sum(countMatrix,dtype=np.float64)\n if countSum > 0.:\n p = countMatrix/countSum\n entropies = (-p)*np.log(p)\n # p=0 yields infinite log and hence nan entropy. We define\n # 0log(0) as 0 though:\n entropies[np.isnan(entropies)] = 0.\n entropy = np.sum(entropies)\n else:\n p = np.zeros(countMatrix.shape)\n entropy = 0.\n #\n return {'p':p,'entropy':entropy}", "def stdProbabilityNorm(self):\n return 1./factorial(self.alpha-1)" ]
[ "0.76036006", "0.67516744", "0.6684365", "0.6516514", "0.64760756", "0.62832654", "0.6267193", "0.62411416", "0.6221311", "0.62205845", "0.6211119", "0.6175704", "0.61547273", "0.59715253", "0.5954994", "0.59400564", "0.58892614", "0.5873213", "0.58565325", "0.5835365", "0.5805754", "0.5805641", "0.57898796", "0.5783675", "0.577394", "0.5706728", "0.56519604", "0.5623014", "0.55960464", "0.557805", "0.5555933", "0.55521727", "0.5548804", "0.5545181", "0.5535437", "0.55349624", "0.5525288", "0.552388", "0.5490453", "0.5484765", "0.54608643", "0.5456032", "0.5432452", "0.54163474", "0.5394291", "0.53849465", "0.53840566", "0.53810143", "0.5374781", "0.5370825", "0.53625935", "0.53461283", "0.5326564", "0.53195137", "0.531795", "0.53150016", "0.53150004", "0.529081", "0.52870965", "0.52859545", "0.5275443", "0.52538955", "0.5250039", "0.5239715", "0.5227947", "0.52119076", "0.5194125", "0.5184932", "0.5176645", "0.5164322", "0.51568663", "0.51526636", "0.51504743", "0.51451343", "0.5135546", "0.5133442", "0.512402", "0.5116963", "0.511572", "0.5115189", "0.5096984", "0.50817883", "0.50761795", "0.5073786", "0.504549", "0.5041545", "0.50307184", "0.50135", "0.5003491", "0.49958417", "0.49946123", "0.4979231", "0.49788022", "0.49788022", "0.49613234", "0.49554163", "0.49444473", "0.49439687", "0.49427527", "0.49340293" ]
0.8918445
0
Simply tests if `img` has 3 channels.
def is_rgb(img: np.ndarray) -> bool: return len(img.shape) >= 1 and img.shape[-1] == 3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_rgb(im):\n if(im.ndim == 3):\n return True\n else:\n return False", "def rgb(self) -> bool:\n return self.image_shape[2] == 3", "def is_RGB(self,img_path):\n image=Image.open(img_path)\n image=np.asarray(image)\n if(len(image.shape)<3):\n return False\n return True", "def is3DImage(self):\n\t\treturn self.is3D", "def check_niimg_3d(niimg, dtype=None):\n return check_niimg(niimg, ensure_ndim=3, dtype=dtype)", "def num_channels_in_image(img: torch.Tensor):\n if img is None or img.ndim < 2:\n raise ValueError('Invalid image data')\n if img.ndim == 2:\n return 1\n else:\n return img.shape[0]", "def is_gray(img: np.ndarray):\n return len(img.shape) == 2 and img.shape[0] > 1 and img.shape[1] > 1", "def is_grayscale(img):\n return len(img.shape) == GS", "def is_cv3():\n (major, minor, _) = cv2.__version__.split('.')\n return int(major) == 3", "def check_image_size(img_name, img_path):\n \n try:\n \n # Open image\n img = Image.open(img_name)\n \n # Determine size of image\n width, height = img.size\n \n # Check if image is square\n if (width==height):\n is_square = True\n else:\n is_square = False\n \n # Check for channels in image\n img_list = list(img.getdata())\n img_max = max(img_list)\n if (type(img_max)==int):\n is_single_channel = True\n else:\n is_single_channel = False\n \n return is_square, is_single_channel\n \n finally:\n \n # Close image\n img.close()", "def _isGrayscale(self, img: ndarray) -> bool:\n if len(np.squeeze(img).shape) == 2:\n return True\n else:\n return False", "def colored(img: np.array):\n # Check if image is colored or black and white\n r, g, b = [normalize(img[..., i]) for i in range(3)]\n color_factor = sum([np.mean(np.square(c1 - c2)) for c1, c2 in ((r, g), (r, b), (b, r))])\n return color_factor >= 0.04", "def check(self, grain=50):\n opengles.glReadPixels(0, 0, self.ix, self.iy,\n GL_RGB, GL_UNSIGNED_BYTE,\n ctypes.byref(self.img))\n r0 = self.img[0:3]\n step = 3 * int(self.ix * self.iy / 50)\n for i in xrange(0, len(self.img)-3, step):\n if self.img[i:(i+3)] != r0:\n return True\n\n return False", "def count_nonblack_np(img):\n return img.any(axis=-1).sum()", "def check_img(img):\n\n if isinstance(img, (str, os.PathLike)) and os.path.exists(img):\n img = nib.load(img)\n elif not isinstance(img, nib.spatialimages.SpatialImage):\n raise TypeError('Provided image must be an existing filepath or a '\n 'pre-loaded niimg-like object')\n\n # ensure 3D or squeezable to 3D\n img = nib.funcs.squeeze_image(img)\n if len(img.shape) != 3:\n raise ValueError('Provided image must be 3D')\n\n # check if atlas data is int or castable to int\n # if image is arrayproxy convert it to an array for speed-up\n data = np.asarray(img.dataobj)\n cast = nib.is_proxy(img.dataobj)\n if img.header.get_data_dtype().kind not in ['i', 'u']:\n idata = data.astype('int32')\n cast = np.allclose(idata, data)\n data = idata\n if not cast:\n raise ValueError('Provided image should have integer values or '\n 'be safely castable to int without data loss')\n if cast:\n img = img.__class__(data, img.affine, header=img.header)\n img.header.set_data_dtype(np.int32)\n\n return img", "def countless3d(data):\n modshape = np.array(data.shape) % 2\n assert sum(\n modshape\n ) == 0, \"COUNTLESS 3D currently only supports even sided images.\" # someone has to write even_to_odd3d\n\n return countless(data, (2, 2, 2))", "def detect(frame: numpy.ndarray) -> bool:\n color = frame[:20, 1100:1150].mean(axis=(0, 1))\n return numpy.linalg.norm(color - BG_COLOR) < 5", "def check_rgb(image):\n im_yiq = []\n rgb = False\n y = image\n if len(image.shape) > 2 and image.shape[-1] == 3: # The image is RGB\n rgb = True\n im_yiq = rgb2yiq(image) # convert to YIQ format\n y = im_yiq[:, :, 0]\n return rgb, y, im_yiq", "def is_three_channeled(value):\n return len(value) == 3", "def num_channels(self):\n return 3", "def check_num_channels(method):\n\n @wraps(method)\n def new_method(self, *args, **kwargs):\n [num_output_channels], _ = parse_user_args(method, *args, **kwargs)\n if num_output_channels is not None:\n if num_output_channels not in (1, 3):\n raise ValueError(\"Number of channels of the output grayscale image\"\n \"should be either 1 or 3. Got {0}\".format(num_output_channels))\n\n return method(self, *args, **kwargs)\n\n return new_method", "def check(self, grain=50):\r\n opengles.glDisable(GL_SCISSOR_TEST)\r\n self.s_flg = False\r\n opengles.glReadPixels(0, self.y0, self.ix, 1,\r\n GL_RGB, GL_UNSIGNED_BYTE,\r\n ctypes.byref(self.img))\r\n r0 = self.img[0:3]\r\n for i in xrange(0, self.img_sz, self.step):\r\n if self.img[i:(i+3)] != r0:\r\n return True\r\n\r\n return False", "def check_color(i, j, k):\n img.show()\n image = Image.new(\"RGB\", (200, 200), (int(Y), int(Y), int(Y)))\n image.show()\n image = Image.new(\"RGB\", (200, 200), (int(i), int(j), int(k)))\n image.show()", "def test_05_01_mask_of3D(self):\n x=cpi.Image()\n x.image = np.ones((10,10,3))\n self.assertTrue(x.mask.ndim==2)", "def valid_image(self, image):\n valid = False\n if (isinstance(image, list) and len(image) == 11):\n valid = True\n for row in image:\n if (isinstance(row, list) and len(row) == 11):\n for pixel in row:\n if not self.valid_color(pixel):\n valid = False\n break\n else:\n valid = False\n break\n if not valid:\n _LOGGER.error(\"Invalid image data received\")\n return valid", "def image_check(kwargs) -> bool:\n\n # Kwarg argument check\n return kwarg_check(\n kwargs=kwargs,\n options=[\n \"min_captured_at\",\n \"max_captured_at\",\n \"radius\",\n \"image_type\",\n \"organization_id\",\n \"fields\",\n ],\n callback=\"image_check\",\n )", "def is_grayscale(self):\n return self.r == self.g == self.b", "def is_grayscale(self):\n return self.r == self.g == self.b", "def _check_data(data):\n if not (data.dtype == _np.float32 and data.flags.c_contiguous):\n raise ValueError('supplied data must be float32 and C contiguous')\n if data.ndim == 2:\n num_frames, channels = data.shape\n elif data.ndim == 1:\n num_frames, channels = data.size, 1\n else:\n raise ValueError('rank > 2 not supported')\n return num_frames, channels", "def contains_black(image):\n extrema = ImageStat.Stat(image).extrema\n r = extrema[0][0]\n g = extrema[1][0]\n b = extrema[2][0]\n\n if r == 0 and g == 0 and b == 0:\n return True\n\n return False", "def check_layers_count(context, count):\n history = DOCKER_CLIENT.history(context.config.userdata['IMAGE'])\n if len(history) == int(count):\n return True\n\n raise Exception(\"Image does not contain %s layers, current number of layers: %s\" % (count, len(history)), history)", "def is_colour(self, im):\n hsl = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n h, s, v = np.mean(hsl, (0, 1))\n if s < 100:\n self.log.info(\n \"Grayscale scan detected (hsv %s, %s, %s), converting...\", h, s, v\n )\n return False\n return True", "def assertIsNifti3D(*args):\n for f in args:\n assertIsNifti(f)\n d = ensure.ensureIsImage(f)\n assert len(d.shape) == 3, \\\n 'incorrect shape for 3D nifti: {}:{}'.format(d.shape, f)", "def is_greyscale(im):\n if im.mode == \"1\":\n return True\n\n if im.mode not in (\"L\", \"RGB\"):\n return False\n #raise ValueError(\"Unsuported image mode\")\n\n if im.mode == \"RGB\":\n rgb = im.split()\n if ImageChops.difference(rgb[0],rgb[1]).getextrema()[1]!=0:\n return False\n if ImageChops.difference(rgb[0],rgb[2]).getextrema()[1]!=0:\n return False\n return True", "def cue_exist(image):\n for x1 in range(IMAGE_LEFT, IMAGE_RIGHT, 2):\n for y1 in range(IMAGE_TOP, IMAGE_DOWN, 25):\n point1 = (x1, y1)\n if is_cue_color(image.getpixel(point1)):\n\n left2 = max(IMAGE_LEFT, x1 - 70)\n right2 = min(IMAGE_RIGHT, x1 + 70)\n y2 = y1 + 25\n for x2 in range(left2, right2):\n point2 = (x2, y2)\n if is_cue_line(point1, point2, image):\n return True\n return False", "def is_valid(box, img):\n valid_width = box['top_left_x'] > 0 and box['bottom_right_x'] < img.shape[1]\n valid_height = box['top_left_y'] > 0 and box['bottom_right_y'] < img.shape[0]\n return valid_width and valid_height", "def has_valid_channel_values(rgb_coll):\n return all([is_0to255(c) and is_int(c) for c in rgb_coll])", "def test_has_alpha(self):\n image_3d = np.array([[ # One image with shape (1, 2, 3)\n [1, 2, 3],\n [4, 5, 6]\n ]])\n image_4d = np.array([[ # One image with shape (1, 3, 4)\n [1, 2, 3, 4],\n [4, 5, 6, 7],\n [8, 9, 10, 11]\n ]])\n image_5d = np.array([[ # One image with shape (1, 1, 5)\n [1, 2, 3, 4, 5]\n ]])\n self.assertEqual(localHDR.has_alpha(image_3d), False)\n self.assertEqual(localHDR.has_alpha(image_4d), True)\n self.assertEqual(localHDR.has_alpha(image_5d), False)", "def has_three_of_a_kind(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val >= 3:\n self.rank_per_hand['2'] = \"three of a kind\"\n return True\n return False", "def is_greyscale(im):\n if im.mode not in (\"L\", \"RGB\"):\n raise ValueError(\"Unsuported image mode\")\n\n if im.mode == \"RGB\":\n rgb = im.split()\n if ImageChops.difference(rgb[0],rgb[1]).getextrema()[1]<50: \n return True\n if ImageChops.difference(rgb[1],rgb[2]).getextrema()[1]<50: \n return True\n if ImageChops.difference(rgb[0],rgb[2]).getextrema()[1]<50: \n return True\n else:\n return True\n return False", "def is_rgb_color(v):\n if hasattr(v, \"r\") and hasattr(v, \"g\") and hasattr(v, \"b\"):\n v = [v.r, v.g, v.b]\n if not isiterable(v) or len(v) < 3:\n return False\n try:\n return all([0 <= int(x) <= 255 for x in v[:3]])\n except (TypeError, ValueError):\n return False", "def needs_to_convert_to_RGB(ds: \"Dataset\") -> bool:\n return False", "def three_different_np_images():\n rgb1 = np.zeros((32, 32, 3), dtype=np.uint8)\n rgb1[..., 0] = 192\n rgb1[..., 1] = 0\n rgb1[..., 2] = 0\n # img1 = Image.fromarray(rgb1)\n\n rgb2 = np.zeros((32, 32, 3), dtype=np.uint8)\n rgb2[..., 0] = 0\n rgb2[..., 1] = 192\n rgb2[..., 2] = 0\n # img2 = Image.fromarray(rgb2)\n\n rgb3 = np.zeros((32, 32, 3), dtype=np.uint8)\n rgb3[..., 0] = 0\n rgb3[..., 1] = 0\n rgb3[..., 2] = 192\n # img3 = Image.fromarray(rgb3)\n\n return (rgb1, rgb2, rgb3)", "def inside_gamut(rgb: ndarray) -> bool:\n return all(rgb >= 0)", "def number_of_images_valid():\r\n if number_of_images_a_valid() and number_of_images_b_valid():\r\n return True\r\n else:\r\n return False", "def IsOk(*args, **kwargs):\n return _gdi_.Colour_IsOk(*args, **kwargs)", "def check_if_original(article):\n num_img = len(article.find_all(\"img\"))\n return num_img < 2", "def green_channel(input_image):\n return input_image[:, :, 1]", "def check_shape(self):\r\n if np.array(self.img).shape != (1536, 2048, 3):\r\n raise BadShape", "def _image_is_large_enough(im):\n return (im.shape[0] >= MIN_DIM) and (im.shape[1] >= MIN_DIM)", "def branches(image):\n return _neighbors_conv(image) > 2", "def equalize_image_channel(channel):\n\n if channel[0][0].shape == (3):\n raise AttributeError(\"More than one color channel.\")\n return cv.equalizeHist(channel)", "def is_three_of_a_kind(hand):\n count = {c:0 for c in cards.keys()}\n for card in hand:\n count[card[0]] += 1\n for c in count:\n if count[c] == 3:\n return (True, cards[c])\n return None", "def check_sub_image(self, ndvi_filename, input_path):\n rgb_filename = re.sub(\"BWNDVI\",\"RGB\",ndvi_filename)\n rgb_img = Image.open(self.get_file(os.path.join(input_path, rgb_filename),\n self.input_location_type))\n img_ok = check_image_ok(rgb_img, 0.05)\n return img_ok", "def get_img_channels(self, pipeline_cfg: RVPipelineConfig) -> int:\n all_scenes = pipeline_cfg.dataset.all_scenes\n if len(all_scenes) == 0:\n return 3\n for scene_cfg in all_scenes:\n if scene_cfg.raster_source.channel_order is not None:\n return len(scene_cfg.raster_source.channel_order)\n log.info(\n 'Could not determine number of image channels from '\n 'DataConfig.img_channels or RasterSourceConfig.channel_order. '\n 'Building first scene to figure it out. This might take some '\n 'time. To avoid this, specify one of the above.')\n with get_tmp_dir() as tmp_dir:\n scene = all_scenes[0].build(\n pipeline_cfg.dataset.class_config,\n tmp_dir,\n use_transformers=True)\n img_channels = scene.raster_source.num_channels\n return img_channels", "def channels_from_example(example):\n image = get_image_array_from_example(example)\n return split_3d_array_into_channels(image)", "def is3D(data):\n return data.find(\"x3\") != -1 and data.find(\"y3\") != -1 and data.find(\"z3\") != -1", "def ensure_alpha_channel(img, alpha=1.0, dtype=np.float32, copy=False):\n img = im_core.ensure_float01(img, dtype=dtype, copy=copy)\n c = im_core.num_channels(img)\n if c == 4:\n return img\n else:\n if isinstance(alpha, np.ndarray):\n alpha_channel = alpha\n else:\n alpha_channel = np.full(img.shape[0:2], fill_value=alpha, dtype=img.dtype)\n if c == 3:\n return np.dstack([img, alpha_channel])\n elif c == 1:\n return np.dstack([img, img, img, alpha_channel])\n else:\n raise ValueError(\n 'Cannot ensure alpha. Input image has c={} channels'.format(c))", "def is_blurry_colorful(image):\n b, _, _ = cv2.split(image)\n a = variance_of_laplacian(b)\n return (variance_of_laplacian(b) < 100)", "def split_image_into_channels(image):\n red_channel = image[:, :, 0]\n green_channel = image[:, :, 1]\n blue_channel = image[:, :, 2]\n return red_channel, green_channel, blue_channel", "def check_sub_image(self, ndvi_filename, input_path):\n rgb_filename = re.sub(\"NDVI\",\"RGB\",ndvi_filename)\n rgb_img = self.get_image(os.path.join(input_path, rgb_filename))\n\n img_ok = check_image_ok(rgb_img, 0.05)\n return img_ok", "def green_channel(img):\n\n green = np.zeros(img.shape,dtype=float)\n\n green[:,:,1] = np.copy(img[:,:,1])\n\n return green", "def showChannels(img, ypos = 0, wait=False):\n num_channels = img.shape[2] if len(img.shape) == 3 else 1\n if num_channels == 1:\n label = 'One channel'\n cv2.imshow(label, img)\n cv2.moveWindow(label, 0, ypos)\n else:\n for i in range(num_channels):\n label = 'Channel ' + str(i)\n cv2.imshow(label, img[:,:,i])\n cv2.moveWindow(label, i * img.shape[1], ypos)\n if wait:\n if cv2.waitKey() & 0xFF == ord('q'):\n sys.exit(0)", "def check_color(c):\n\n c = asarray(c)\n if c.ndim == 1:\n c = c.flatten()\n c = c[newaxis, :]\n if c.shape[1] != 3:\n raise Exception(\"Color must have three values per point\")\n elif c.ndim == 2:\n if c.shape[1] != 3:\n raise Exception(\"Color array must have three values per point\")\n return c", "def get_num_channels(x):\n return x.get_shape().as_list()[-1]", "def check_niimg_4d(niimg, return_iterator=False, dtype=None):\n return check_niimg(\n niimg, ensure_ndim=4, return_iterator=return_iterator, dtype=dtype\n )", "def test_one_image(self, img):\n return self.__image_pipeline(img)", "def test_check_image_color(self):\n result = analyzer.check_image_color(\"tests/test_files/sample.jpg\")\n self.assertEqual(result, \"light\")", "def test_complex(self):\n image = self.design.layout.layers[0].images[2]\n assert len(image.shape_instances) == 3", "def pixel_at(self, x, y):\n return self.arr[x, y, 1] == 255", "def check_image_color(image):\n\n def check_color(i, j, k):\n \"\"\" Function used only for DEBUGGING\"\"\"\n img.show()\n image = Image.new(\"RGB\", (200, 200), (int(Y), int(Y), int(Y)))\n image.show()\n image = Image.new(\"RGB\", (200, 200), (int(i), int(j), int(k)))\n image.show()\n\n if not os.path.isfile(image):\n return \"Image not found\"\n\n def calculate_bgr(data):\n average_color_per_row = numpy.average(data, axis=0)\n average_color = numpy.average(average_color_per_row, axis=0)\n return tuple(average_color)\n\n def calculate_y(r, g, b):\n alpha = 0.299\n betta = 0.587\n gamma = 0.114\n return alpha * r + betta * g + gamma * b\n\n # split the image for four squares calucate averate pixel for them and take higest value\n # blure image and save to /Library/Caches as com.apple.desktop.admin.png\n # in case using blur tool --> blur = cv2.blur(img,(5,5))\n try:\n img_cv_data = cv2.imread(image)\n B, G, R = calculate_bgr(img_cv_data)\n Y = calculate_y(B, G, R)\n height, width = img_cv_data.shape[:2]\n except Exception as err:\n print(f\"[ERROR] {err} with image: {image}\")\n return \"Error parsing image\"\n\n # image detection\n if Y < 72.0:\n _type = \"dark\"\n elif Y >= 73.0 and Y <= 108.0:\n _type = \"evening\"\n else:\n _type = \"light\"\n\n return _type", "def check_image(image, depth):\n cols, rows = image.size\n divisor = 2**depth\n n_rows = round(rows/divisor) * divisor\n n_cols = round(cols/divisor) * divisor\n # d = min(n_rows, n_cols)\n image = image.resize((n_cols, n_rows))\n image_array = np.asarray(image)\n return image_array, Fraction(n_rows, n_cols)", "def validate_shape_and_dtype(self):\n if self.rgb.dtype != tf.float32:\n raise ValueError(\"Expected float32 rgb!\")\n if len(self.rgb.shape) != 4:\n raise ValueError(f\"Expected (B, H, W, C), got {self.rgb.shape}\")\n _, _, _, channels = self.rgb.shape.as_list()\n if channels != 3:\n raise ValueError(f\"Expected 3 rgb channels, got shape {self.rgb.shape}\")", "def _test_image_alpha(self, image):\n\n # In the interest of speed, let's see if we've already done this one...\n result = self._alphatest.get(image, None)\n if result is not None:\n return result\n\n if image.channels != 4:\n result = False\n elif not image.use_alpha:\n result = False\n else:\n # Using bpy.types.Image.pixels is VERY VERY VERY slow...\n key = _Texture(image=image)\n with GLTexture(key, fast=True) as glimage:\n result = glimage.has_alpha\n\n self._alphatest[image] = result\n return result", "def check_availability(img_path):\n # loading gray image\n gray_image = cv2.imread(img_path, 0)\n\n # check whether img give empty list or not\n flag = face_recognition.face_locations(gray_image)\n if flag:\n return True\n return False", "def test_RGB_mode():\n\n model = Instafilter(\"Lo-Fi\")\n\n f_image = __local__ / \"Normal.jpg\"\n\n img1 = model(f_image)\n img2 = model(f_image, is_RGB=True)\n\n diff = (img1 - img2).sum()\n\n assert abs(diff) > 0", "def color_detection(self, img):\n\n # red\n low_red = np.array([0, 0, 160])\n high_red = np.array([130, 130, 255])\n red_threshold = cv2.inRange(img, low_red, high_red)\n\n # green\n low_green = np.array([0, 120, 0])\n high_green = np.array([90, 255, 90])\n green_threshold = cv2.inRange(img, low_green, high_green)\n\n # yellow\n low_yellow = np.array([0, 140, 140])\n high_yellow = np.array([150, 255, 255])\n yellow_threshold = cv2.inRange(img, low_yellow, high_yellow)\n\n count = np.sum(np.nonzero(red_threshold))\n if count == 0:\n print(\"Not red\")\n else:\n print(\"red\")\n return \"red\"\n\n count = np.sum(np.nonzero(green_threshold))\n if count == 0:\n print(\"Not green\")\n else:\n print(\"green\")\n return \"green\"\n\n count = np.sum(np.nonzero(yellow_threshold))\n if count == 0:\n print(\"Not yellow\")\n else:\n print(\"yellow\")\n return \"yellow\"", "def falso_color(img):\n rows,cols = img.shape\n img_red = np.copy(img)\n img_green = np.copy(img)\n img_blue = np.copy(img)\n img_false = np.zeros((rows, cols, 3), dtype=np.uint8)\n\n for i in range(0,rows):\n for j in range(0,cols):\n\n if (0 <= img[i, j] <= 43):\n img_red[i, j] = 255\n img_green[i, j] = img[i, j] * (255 / 43)\n img_blue[i, j] = 0\n\n elif(43 < img[i, j] <= 86):\n img_red[i, j] = (255 - (img[i, j] - 43) * (255 / 43))\n img_green[i, j] = 255\n img_blue[i,j] = 0\n\n elif(86 < img[i, j] <= 128):\n img_red[i, j] = 0\n img_green[i, j] = 255\n img_blue[i, j] = ((img[i, j] - 86) * (255 / 42))\n\n elif(128<img[i, j]<=171):\n img_red[i, j] = 0\n img_green[i, j] = ((171 - img[i, j]) * (255 / 43))\n img_blue[i, j] = 255\n\n elif(171 < img[i, j] <= 214):\n img_red[i, j] = (img[i, j] - 171) * (255 / 43)\n img_green[i, j] = 0\n img_blue[i, j] = 255\n\n elif(214 < img[i, j]):\n img_red[i, j] = 255\n img_green[i, j] = 0\n img_blue[i, j] = ((255 - img[i, j]) * (255 / 41))\n\n img_false[:, :, 0] = img_red\n img_false[:, :, 1] = img_green\n img_false[:, :, 2] = img_blue\n\n return img_false", "def has_images(self):\n return len(self.images) > 0", "def pixel_has_same_depth(self, x, y, z: float, threshold: float) -> bool:\n return abs(self.frame[int(y)][int(x)] * 1000 - z) < threshold", "def conditional_to_greyscale(self, image):\r\n bands = image.getbands()\r\n if len(bands) >= 3:\r\n # histogram for all bands concatenated\r\n hist = image.histogram()\r\n if len(hist) >= 768:\r\n hist1 = hist[0:256]\r\n hist2 = hist[256:512]\r\n hist3 = hist[512:768]\r\n # print \"length of histograms: %d %d %d\" % (len(hist1), len(hist2), len(hist3))\r\n if hist1 == hist2 == hist3:\r\n # print \"All histograms are the same!\"\r\n return image.convert('L')\r\n return image", "def bbox_3D(img):\n\tr = np.any(img, axis=(1, 2))\n\tc = np.any(img, axis=(0, 2))\n\tz = np.any(img, axis=(0, 1))\n\n\trmin, rmax = np.where(r)[0][[0, -1]]\n\tcmin, cmax = np.where(c)[0][[0, -1]]\n\tzmin, zmax = np.where(z)[0][[0, -1]]\n\n\treturn rmin, rmax, cmin, cmax, zmin, zmax", "def is_three_of_a_kind(hand):\n\tis_a_three_of_a_kind = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 3:\n\t\t\tis_a_three_of_a_kind = True\n\t\ti += 1 \n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_three_of_a_kind == True:\n\t\tif hand[j] == 3 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_three_of_a_kind:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def is3_d(self):\n return self.container['is3_d']", "def image_comparison(self):\n for result in self.cards:\n if result.image_status:\n return True\n return False", "def convert_when_colour(colour, img):\n if len(colour) == 3:\n if len(img.shape) == 2:\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n elif img.shape[2] == 1:\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n return img", "def should_change_PhotometricInterpretation_to_RGB(ds: \"Dataset\") -> bool:\n return False", "def check_type(filename):\n try:\n im = Image.read(filename)\n except SanperaError:\n return False\n else:\n return im.original_format in [b'JPEG', b'PNG', b'GIF']", "def img_split(img):\n\tbands = img.shape[2]\n\tif bands is 1:\n\t\treturn \"Image already is 1D. Why would you split it?\"\n\n\tband1 = img[:, :, 0]\n\tband2 = img[:, :, 1]\n\tband3 = img[:, :, 2]\n\tif bands is 4:\n\t\tband4 = img[:, :, 4]\n\t\treturn(band1, band2, band3, band4)\n\treturn(band1, band2, band3)", "def detect_face(image):\n cascadePath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(cascadePath)\n faces = faceCascade.detectMultiScale(image)\n if len(faces)>=1:#Should be == , not >=\n return True\n return False", "def numberOfChannelsInCube(img, returnFreqs=False, returnChannelWidth=False, \n verbose=False, header=None, faster=False):\n if (header is None):\n print \"imhead\", # the comma prevents the newline so that ...10...20 will be on same line\n if faster:\n header = imheadlist(img, omitBeam=True)\n else:\n header = imhead(img,mode='list')\n if (header is None):\n print \"imhead failed -- this may not be a CASA or FITS image cube.\"\n return\n nchan = 1\n for axis in range(3,5):\n if ('ctype'+str(axis) in header.keys()):\n if (header['ctype'+str(axis)] in ['Frequency','Velocity']):\n nchan = header['shape'][axis-1]\n break\n firstFreq = 0\n lastFreq = 0\n if ('ctype4' not in header.keys()):\n print \"There is no fourth axis in this image.\"\n elif (header['ctype4'].lower().find('freq') >= 0):\n crpix = header['crpix4']\n crval = header['crval4']\n cdelt = header['cdelt4']\n firstFreq = crval + (0-crpix)*cdelt\n lastFreq = crval + (nchan-1-crpix)*cdelt\n if (verbose):\n print \"Channel 0 = %.0f Hz\" % (firstFreq)\n print \"Channel %d = %.0f Hz\" % (nchan-1,lastFreq)\n elif (header['ctype3'].lower().find('freq') >= 0):\n crpix = header['crpix3']\n crval = header['crval3']\n cdelt = header['cdelt3']\n firstFreq = crval + (0-crpix)*cdelt\n lastFreq = crval + (nchan-1-crpix)*cdelt\n if (verbose):\n print \"Channel 0 = %.0f Hz\" % (firstFreq)\n print \"Channel %d = %.0f Hz\" % (nchan-1,lastFreq)\n else:\n casalogPost(\"Neither the third or fourth axis is frequency.\")\n if (returnFreqs):\n if (returnChannelWidth):\n return(nchan,firstFreq,lastFreq,cdelt)\n else:\n return(nchan,firstFreq,lastFreq)\n else:\n if (returnChannelWidth):\n return(nchan,cdelt)\n else:\n return(nchan)", "def test_make_3bit_errors(self):\r\n bitvecs = golay._make_3bit_errors()\r\n self.assertTrue(list([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,\r\n 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) in map(list, bitvecs))\r\n self.assertFalse(list([0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,\r\n 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0]) in map(list, bitvecs))", "def equals(self, image: 'BaseImage') -> bool:\n assert isinstance(image, BaseImage)\n im1 = pygame.image.tostring(self._surface, 'RGBA')\n im2 = pygame.image.tostring(image._surface, 'RGBA')\n return im1 == im2", "def face_detector(img_path: str):\r\n img = cv2.imread(img_path)\r\n\r\n # if no image at that path, return False\r\n if img is None:\r\n return False\r\n\r\n # convert to grey\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n # detect faces. If no face detected, it's empty and len(faces) will be 0\r\n faces = face_cascade.detectMultiScale(gray)\r\n return len(faces) > 0", "def isthmus1D(cube):\n \n return countComponents26(cube) >= 2;", "def num_channels(input_tensor):\n return input_tensor.get_shape().as_list()[-1]", "def has_data(self):\n if len(self.channels) > 0:\n return True\n return False", "def check_image_before_load(self,image_dims):\n\n if image_dims[0]*image_dims[1]*image_dims[2]*4 < self.check_available_memory():\n return True\n else:\n return False", "def im_detect(net, target_data,im_data, im_info, features_given=True):\n\n cls_prob, rois = net(target_data, im_data, im_info,\n features_given=features_given)\n scores = cls_prob.data.cpu().numpy()[0,:,:]\n zs = np.zeros((scores.size, 1))\n scores = np.concatenate((zs,scores),1)\n boxes = rois.data.cpu().numpy()[0,:, :]\n\n return scores, boxes", "def validate_image(path):\n problems = False\n # Rasterio env is required to make sure that the gdal bindings are setup correctly.\n with rasterio.Env():\n try:\n dataset = rasterio.open(path)\n except Exception as e:\n logging.error(\"Could not open dataset\", e)\n return False\n\n # Check the bands have sort of sensible values\n if dataset.count != args.bands:\n logging.error(f\"There is not the required number of bands. Expected {args.bands} found {dataset.count}\")\n problems = True\n\n if not data_validation.check_data(dataset):\n problems = True\n\n # Validate coordinate box doesn't cover the origin.\n # Also make sure that it has valid coordinates.\n if dataset.transform:\n top_left = dataset.transform * (0, 0)\n bottom_right = dataset.transform * (dataset.width, dataset.height)\n if np.sign(bottom_right[0]) != np.sign(top_left[0]) and np.sign(bottom_right[1]) != np.sign(top_left[1]):\n logging.error(f\"Data set appears to be over the origin of the coordinate space.\")\n problems = True\n else:\n logging.error(f\"Dataset transform is missing.\")\n problems = True\n return not problems # return true if the image is valid" ]
[ "0.72373766", "0.7074375", "0.67956823", "0.6699473", "0.66691566", "0.6518619", "0.65036654", "0.62500054", "0.62126744", "0.6188705", "0.61673236", "0.61294484", "0.61257964", "0.6089409", "0.594763", "0.59257436", "0.5916708", "0.5915381", "0.58001804", "0.5760735", "0.57465404", "0.5720184", "0.5707138", "0.5641538", "0.5616377", "0.56064075", "0.5595096", "0.5595096", "0.5594173", "0.5593368", "0.5572259", "0.55659056", "0.5559572", "0.55576086", "0.5541099", "0.55328524", "0.5521901", "0.5517286", "0.54931706", "0.54818773", "0.54795647", "0.54764485", "0.5473078", "0.5469509", "0.5467954", "0.5465145", "0.544698", "0.5431321", "0.54259855", "0.54113495", "0.5405406", "0.54026693", "0.53989583", "0.5395679", "0.5384201", "0.53724164", "0.53657657", "0.53552866", "0.53539604", "0.5347817", "0.5347468", "0.53413934", "0.5336926", "0.53041726", "0.5292", "0.5289654", "0.5289446", "0.5286033", "0.5267501", "0.5226458", "0.52218133", "0.52165663", "0.52103984", "0.51822054", "0.517682", "0.5152935", "0.5148545", "0.5142711", "0.5140702", "0.51406986", "0.5139755", "0.5134656", "0.5124755", "0.51243764", "0.5120254", "0.5119867", "0.51106673", "0.51061976", "0.50865287", "0.5084243", "0.5083169", "0.5075416", "0.50653493", "0.5059952", "0.50542486", "0.5053621", "0.50484675", "0.50454277", "0.50449705", "0.5031706" ]
0.74561703
0
Converts an array [..., channels] of RGB values to HSI color values (H in rad). RGB values are assumed to be normalized to (0, 1).
def rgb_to_hsi(image: np.ndarray) -> np.ndarray: if not is_rgb(image): raise ValueError("Input needs to be an array of RGB values") r = image[..., 0] g = image[..., 1] b = image[..., 2] out = np.zeros_like(image) # allequal = (img == img[:, :, 0, np.newaxis]).all(axis=-1) with np.errstate(invalid="ignore"): tmp = (2.0 * r - g - b) / 2.0 / np.sqrt((r - g) ** 2 + (r - b) * (g - b)) # if r==g==b then 0/0 theta = np.arccos(np.clip(tmp, -1.0, +1.0)) out[..., 0] = np.where(b <= g, theta, 2 * np.pi - theta) # H out[..., 2] = np.sum(image, axis=-1) / 3.0 # I out[..., 1] = 1 - np.amin(image, axis=-1) / out[..., 2] # S if r==g==b==0 then 0/0 np.nan_to_num(out[..., 0:2], copy=False) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rgb2hsl_img(rgb):\r\n \r\n def core(_rgb, _hsl):\r\n\r\n irgb = _rgb.astype(np.uint16)\r\n ir, ig, ib = irgb[:, :, 0], irgb[:, :, 1], irgb[:, :, 2]\r\n h, s, l = _hsl[:, :, 0], _hsl[:, :, 1], _hsl[:, :, 2]\r\n\r\n imin, imax = irgb.min(2), irgb.max(2)\r\n iadd, isub = imax + imin, imax - imin\r\n\r\n ltop = (iadd != 510) * (iadd > 255)\r\n lbot = (iadd != 0) * (ltop == False)\r\n\r\n l[:] = iadd.astype(np.float) / 510\r\n\r\n fsub = isub.astype(np.float)\r\n s[ltop] = fsub[ltop] / (510 - iadd[ltop])\r\n s[lbot] = fsub[lbot] / iadd[lbot]\r\n\r\n not_same = imax != imin\r\n is_b_max = not_same * (imax == ib)\r\n not_same_not_b_max = not_same * (is_b_max == False)\r\n is_g_max = not_same_not_b_max * (imax == ig)\r\n is_r_max = not_same_not_b_max * (is_g_max == False) * (imax == ir)\r\n\r\n h[is_r_max] = ((0. + ig[is_r_max] - ib[is_r_max]) / isub[is_r_max])\r\n h[is_g_max] = ((0. + ib[is_g_max] - ir[is_g_max]) / isub[is_g_max]) + 2\r\n h[is_b_max] = ((0. + ir[is_b_max] - ig[is_b_max]) / isub[is_b_max]) + 4\r\n h[h < 0] += 6\r\n h[:] /= 6\r\n\r\n hsl = np.zeros(rgb.shape, dtype=np.float)\r\n cpus = multiprocessing.cpu_count()\r\n length = int(math.ceil(float(hsl.shape[0]) / cpus))\r\n line = 0\r\n threads = []\r\n while line < hsl.shape[0]:\r\n line_next = line + length\r\n thread = threading.Thread(target=core, args=(rgb[line:line_next], hsl[line:line_next]))\r\n thread.start()\r\n threads.append(thread)\r\n line = line_next\r\n\r\n for thread in threads:\r\n thread.join()\r\n\r\n return hsl", "def hsv_convert(arr):\n \n # adapted from Arnar Flatberg\n # http://www.mail-archive.com/[email protected]/msg06147.html\n # it now handles NaN properly and mimics colorsys.rgb_to_hsv output\n\n import numpy as np\n\n #assert(arr.min()>=0 and arr.max()<=1)\n\n #arr = arr/255.\n arr = arr.astype(\"float32\")\n out = np.empty_like(arr)\n\n arr_max = arr.max(-1)\n delta = arr.ptp(-1)\n s = delta / arr_max\n \n s[delta==0] = 0\n\n # red is max\n idx = (arr[:,:,0] == arr_max) \n out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]\n\n # green is max\n idx = (arr[:,:,1] == arr_max) \n out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0] ) / delta[idx]\n\n # blue is max\n idx = (arr[:,:,2] == arr_max) \n out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1] ) / delta[idx]\n\n out[:,:,0] = (out[:,:,0]/6.0) % 1.0\n out[:,:,1] = s\n out[:,:,2] = arr_max\n\n # rescale back to [0, 255]\n #out *= 255.\n\n # remove NaN\n out[np.isnan(out)] = 0\n\n return out", "def hsl2rgb_img(hsl):\r\n\r\n def core(_hsl, _frgb):\r\n\r\n h, s, l = _hsl[:, :, 0], _hsl[:, :, 1], _hsl[:, :, 2]\r\n fr, fg, fb = _frgb[:, :, 0], _frgb[:, :, 1], _frgb[:, :, 2]\r\n\r\n q = np.zeros(l.shape, dtype=np.float)\r\n\r\n lbot = l < 0.5\r\n q[lbot] = l[lbot] * (1 + s[lbot])\r\n\r\n ltop = lbot == False\r\n l_ltop, s_ltop = l[ltop], s[ltop]\r\n q[ltop] = (l_ltop + s_ltop) - (l_ltop * s_ltop)\r\n\r\n p = 2 * l - q\r\n q_sub_p = q - p\r\n\r\n is_s_zero = s == 0\r\n l_is_s_zero = l[is_s_zero]\r\n per_3 = 1./3\r\n per_6 = 1./6\r\n two_per_3 = 2./3\r\n\r\n def calc_channel(channel, t):\r\n\r\n t[t < 0] += 1\r\n t[t > 1] -= 1\r\n t_lt_per_6 = t < per_6\r\n t_lt_half = (t_lt_per_6 == False) * (t < 0.5)\r\n t_lt_two_per_3 = (t_lt_half == False) * (t < two_per_3)\r\n t_mul_6 = t * 6\r\n\r\n channel[:] = p.copy()\r\n channel[t_lt_two_per_3] = p[t_lt_two_per_3] + q_sub_p[t_lt_two_per_3] * (4 - t_mul_6[t_lt_two_per_3])\r\n channel[t_lt_half] = q[t_lt_half].copy()\r\n channel[t_lt_per_6] = p[t_lt_per_6] + q_sub_p[t_lt_per_6] * t_mul_6[t_lt_per_6]\r\n channel[is_s_zero] = l_is_s_zero.copy()\r\n\r\n calc_channel(fr, h + per_3)\r\n calc_channel(fg, h.copy())\r\n calc_channel(fb, h - per_3)\r\n\r\n frgb = np.zeros(hsl.shape, dtype=np.float)\r\n cpus = multiprocessing.cpu_count()\r\n length = int(math.ceil(float(hsl.shape[0]) / cpus))\r\n line = 0\r\n threads = []\r\n while line < hsl.shape[0]:\r\n line_next = line + length\r\n thread = threading.Thread(target=core, args=(hsl[line:line_next], frgb[line:line_next]))\r\n thread.start()\r\n threads.append(thread)\r\n line = line_next\r\n\r\n for thread in threads:\r\n thread.join()\r\n\r\n return (frgb*255).round().astype(np.uint8)", "def rgb_to_hsv(x):\n # separating channels\n R = x[:,:,0]\n G = x[:,:,1]\n B = x[:,:,2]\n \n \n # h, s, v = hue, saturation, value \n # initial arrays for h, s and v filled with 0.0\n # we take R array just as 2D sample for copying the shape\n H = np.full_like(R, 0.0, dtype=np.double)\n S = np.full_like(R, 0.0, dtype=np.double)\n V = np.full_like(R, 0.0, dtype=np.double)\n \n HSV = np.full_like(x, 0.0, dtype=np.double)\n \n # np.max/min and axis=2 creates a 2D matrix\n C_max = np.max(x, axis=2) # maximum of r, g, b \n C_min = np.min(x, axis=2) # minimum of r, g, b \n Diff = C_max - C_min # diff of cmax and cmin. \n \n # Formula:\n # https://www.geeksforgeeks.org/program-change-rgb-color-model-hsv-color-model/\n \n # if cmax and cmax are equal (R=G=B) then h = 0 \n H[np.isclose(C_max, R, 0.0001)] = 0 \n \n # if cmax equal r \n m = np.isclose(C_max, R, 0.0001)&(Diff!=0)\n H[m] = (60 * ((G[m] - B[m]) / Diff[m]) + 360) % 360\n \n\n # if cmax equal g \n m = np.isclose(C_max, G, 0.0001)&(Diff!=0)\n H[m] = (60 * ((B[m] - R[m]) / Diff[m]) + 120) % 360\n \n # if cmax equal b \n m = np.isclose(C_max, B, 0.0001)&(Diff!=0)\n H[m] = (60 * ((R[m] - G[m]) / Diff[m]) + 240) % 360\n \n # if cmax equal zero \n S[C_max == 0] = 0\n \n # else\n m = (C_max != 0)\n S[m] = (Diff[m] / C_max[m])\n \n # compute v \n V = C_max\n \n # building new 3D picture\n HSV[:,:,0] = H\n HSV[:,:,1] = S\n HSV[:,:,2] = V\n \n return HSV", "def _rgb_to_hsv(img):\n maxc = img.max(axis=-3)\n minc = img.min(axis=-3)\n\n is_equal = paddle.equal(maxc, minc)\n one_divisor = paddle.ones_like(maxc)\n c_delta = maxc - minc\n # s is 0 when maxc == minc, set the divisor to 1 to avoid zero divide.\n s = c_delta / paddle.where(is_equal, one_divisor, maxc)\n\n r, g, b = img.unbind(axis=-3)\n c_delta_divisor = paddle.where(is_equal, one_divisor, c_delta)\n # when maxc == minc, there is r == g == b, set the divisor to 1 to avoid zero divide.\n rc = (maxc - r) / c_delta_divisor\n gc = (maxc - g) / c_delta_divisor\n bc = (maxc - b) / c_delta_divisor\n\n hr = (maxc == r).astype(maxc.dtype) * (bc - gc)\n hg = ((maxc == g) & (maxc != r)).astype(maxc.dtype) * (rc - bc + 2.0)\n hb = ((maxc != r) & (maxc != g)).astype(maxc.dtype) * (gc - rc + 4.0)\n h = (hr + hg + hb) / 6.0 + 1.0\n h = h - h.trunc()\n return paddle.stack([h, s, maxc], axis=-3)", "def rgb_to_hsv(x):\n hsv = th.zeros(*x.size())\n c_min = x.min(0)\n c_max = x.max(0)\n\n delta = c_max[0] - c_min[0]\n\n # set H\n r_idx = c_max[1].eq(0)\n hsv[0][r_idx] = ((x[1][r_idx] - x[2][r_idx]) / delta[r_idx]) % 6\n g_idx = c_max[1].eq(1)\n hsv[0][g_idx] = 2 + ((x[2][g_idx] - x[0][g_idx]) / delta[g_idx])\n b_idx = c_max[1].eq(2)\n hsv[0][b_idx] = 4 + ((x[0][b_idx] - x[1][b_idx]) / delta[b_idx])\n hsv[0] = hsv[0].mul(60)\n\n # set S\n hsv[1] = delta / c_max[0]\n\n # set V - good\n hsv[2] = c_max[0]\n\n return hsv", "def rgbToHsv ( r, g = 0.0, b = 0.0 ):\n # Check if argument is list\n if isinstance(r, list):\n g = r[1]\n b = r[2]\n r = r[0]\n if isinstance( r, int ):\n r /= 255.0\n if isinstance( g, int ):\n g /= 255.0\n if isinstance( b, int ):\n b /= 255.0\n\n _max = max( r, g, b )\n _min = min( r, g, b )\n v = _max\n\n d = _max - _min\n s = 0.0 if max == 0.0 else d / _max\n\n if _max == _min:\n h = 0.0 # achromatic\n else:\n if _max == r:\n h = ( g - b ) / d + ( 6.0 if g < b else 0.0 )\n elif _max == g:\n h = ( b - r ) / d + 2.0\n elif _max == b:\n h = (r - g) / d + 4.0\n h /= 6.0\n\n # map top 360, 100, 100\n # h = int( round( h * 360 ) )\n # s = int( round( s * 100 ) )\n # v = int( round( v * 100 ) )\n\n return [ h, s, v ]", "def rgbToHsv ( r, g = 0.0, b = 0.0 ):\n\n # Check if argument is list\n if isinstance(r, list):\n g = r[1]\n b = r[2]\n r = r[0]\n\n _max = max( r, g, b )\n _min = min( r, g, b )\n v = _max\n d = _max - _min;\n s = 0.0 if _max == 0.0 else d / _max\n\n if _max == _min:\n h = 0.0 # achromatic\n else:\n if _max == r:\n h = (g - b) / d + ( 6.0 if g < b else 0.0 )\n elif _max == g:\n h = (b - r) / d + 2.0\n elif _max == b:\n h = (r - g) / d + 4.0\n h /= 6.0\n return [ h, s, v ]", "def rgb_to_hsv(rgb, dtype=numpy.float64):\n arr = _prepare_colorarray(rgb, dtype)\n out = numpy.empty_like(arr)\n\n # -- V channel\n out_v = arr.max(-1)\n\n # -- S channel\n delta = arr.ptp(-1)\n\n # Ignore warning for zero divided by zero\n old_settings = numpy.seterr(divide='ignore', invalid='ignore')\n try:\n out_s = delta / out_v\n out_s[delta == 0.] = 0.\n\n # -- H channel\n # Red is max.\n idx = (arr[:, 0] == out_v)\n out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]\n\n # Green is max\n idx = (arr[:, 1] == out_v)\n out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]\n\n # Blue is max.\n idx = (arr[:, 2] == out_v)\n out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]\n out_h = (out[:, 0] / 6.) % 1.\n out_h[delta == 0.] = 0.\n finally:\n numpy.seterr(**old_settings)\n\n # -- Output\n out[:, 0] = out_h\n out[:, 1] = out_s\n out[:, 2] = out_v\n\n # Remove NaNs\n out[numpy.isnan(out)] = 0\n return out", "def hslToRgb ( h, s = 0.0, l = 0.0, a = 1.0 ):\n\n # Check if argument is list\n if isinstance(h, list):\n s = h[1]\n l = h[2]\n h = h[0]\n\n if isinstance(h, int):\n h /= 360.0\n if isinstance(s, int):\n s /= 100.0\n if isinstance(l, int):\n l /= 100.0\n\n r = l\n g = l\n b = l\n v = l * ( 1.0 + s ) if l <= 0.5 else l + s - l * s\n if ( v > 0 ):\n m = l + l - v\n sv = ( v - m ) / v\n h *= 6.0\n sextant = int( math.floor( h ) )\n fract = h - sextant\n vsf = v * sv * fract\n mid1 = m + vsf\n mid2 = v - vsf\n\n # Switch sextant\n if sextant == 0:\n r = v\n g = mid1\n b = m\n elif sextant == 1:\n r = mid2\n g = v\n b = m\n elif sextant == 2:\n r = m\n g = v\n b = mid1\n elif sextant == 3:\n r = m\n g = mid2\n b = v\n elif sextant == 4:\n r = mid1\n g = m\n b = v\n elif sextant == 5:\n r = v\n g = m\n b = mid2\n\n return [ r, g, b ]", "def __RGB_to_Hue(var_R, var_G, var_B, var_min, var_max):\r\n\r\n if var_max == var_min:\r\n return 0.0\r\n elif var_max == var_R:\r\n return (60.0 * ((var_G - var_B) / (var_max - var_min)) + 360) % 360.0\r\n elif var_max == var_G:\r\n return 60.0 * ((var_B - var_R) / (var_max - var_min)) + 120\r\n elif var_max == var_B:\r\n return 60.0 * ((var_R - var_G) / (var_max - var_min)) + 240.0", "def rgb2hsl(rgb, h_prec=0, sl_prec=3):\n for value in rgb:\n if not 0 <= value <= 255:\n raise ValueError('One or more RGB values are outside [0, 255]')\n\n r, g, b = rgb[0] / 255, rgb[1] / 255, rgb[2] / 255\n\n c_max = max(r, g, b)\n c_min = min(r, g, b)\n delta = c_max - c_min\n\n # Hue\n if delta == 0:\n h = 0\n elif c_max == r:\n h = 60 * (((g - b) / delta) % 6)\n elif c_max == g:\n h = 60 * ((b - r) / delta + 2)\n else:\n h = 60 * ((r - g) / delta + 4)\n\n # Lightness\n l = (c_max + c_min) / 2\n\n # Saturation\n if delta == 0:\n s = 0\n else:\n s = delta / (1 - abs(2 * l - 1))\n\n return round(h, h_prec), round(s, sl_prec), round(l, sl_prec)", "def rgb_to_hues(rgb):\n hsv = filters.filter_rgb_to_hsv(rgb, display_np_info=False)\n h = filters.filter_hsv_to_h(hsv, display_np_info=False)\n return h", "def RGB_to_HSL(cobj, *args, **kwargs):\r\n \r\n var_R = cobj.rgb_r\r\n var_G = cobj.rgb_g\r\n var_B = cobj.rgb_b\r\n \r\n var_max = max(var_R, var_G, var_B)\r\n var_min = min(var_R, var_G, var_B)\r\n \r\n var_H = __RGB_to_Hue(var_R, var_G, var_B, var_min, var_max)\r\n var_L = 0.5 * (var_max + var_min)\r\n \r\n if var_max == var_min:\r\n var_S = 0\r\n elif var_L <= 0.5:\r\n var_S = (var_max - var_min) / (2.0 * var_L)\r\n else:\r\n var_S = (var_max - var_min) / (2.0 - (2.0 * var_L))\r\n \r\n return HSLColor(\r\n var_H, var_S, var_L)", "def rgbToHsl( r, g = 0.0, b = 0.0 ):\n\n # Check if argument is list\n if isinstance(r, list):\n g = r[1]\n b = r[2]\n r = r[0]\n\n _max = max( r, g, b )\n _min = min( r, g, b )\n l = (_max + _min) / 2.0\n\n if _max == _min:\n # achromatic\n h = 0.0\n s = 0.0\n else:\n d = _max - _min\n s = d / ( 2.0 - _max - _min ) if l > 0.5 else d / (_max + _min)\n\n if _max == r:\n h = ( g - b ) / d + ( 6.0 if g < b else 0.0 )\n elif _max == g:\n h = ( b - r ) / d + 2.0\n else: # max == b\n h = ( r - g ) / d + 4.0\n h /= 6.0\n return [ h, s, l ]", "def hsvToRgb ( h, s = 0.0, v = 0.0 ):\n # Check if first argument is list\n if isinstance(h, list):\n s = h[1]\n v = h[2]\n h = h[0]\n if isinstance( h, int ):\n h /= 360.0\n if isinstance( s, int ):\n s /= 100.0\n if isinstance( v, int ):\n v /= 100.0\n\n if v == 0.0:\n return [0, 0, 0]\n\n h = h * 6.0\n i = int( math.floor( h ) )\n\n f = h - i\n p = v * ( 1.0 - s )\n q = v * ( 1.0 - ( s * f ) )\n t = v * ( 1.0 - ( s * ( 1.0 - f ) ) )\n\n if i == 0:\n r = v\n g = t\n b = p\n elif i == 1:\n r = q\n g = v\n b = p\n elif i == 2:\n r = p\n g = v\n b = t\n elif i == 3:\n r = p\n g = q\n b = v\n elif i == 4:\n r = t\n g = p\n b = v\n elif i == 5:\n r = v\n g = p\n b = q\n # To return int\n # r = int( math.floor( r * 255 ) )\n # g = int( math.floor( g * 255 ) )\n # b = int( math.floor( b * 255 ) )\n\n return [ r, g, b ]", "def tohsv(self):\n\n return rgb_to_hsv(self.r * RGB_CHANNEL_SCALE, self.g * RGB_CHANNEL_SCALE, self.b * RGB_CHANNEL_SCALE)", "def RGBtoHSL( rgb ):\n # R' = R/255 (G' = G/255, B' = B/255)\n Rp = rgb[2]/255\n Gp = rgb[1]/255\n Bp = rgb[0]/255\n Cmax = max(Rp,Gp,Bp)\n Cmin = min(Rp,Gp,Bp)\n Delta = Cmax - Cmin\n if Delta == 0:\n Hue = 0\n elif Cmax == Rp:\n Hue = 60*(((Gp-Bp)/Delta)%6)\n elif Cmax == Gp:\n Hue = 60*((Bp-Rp)/Delta + 2)\n else:\n Hue = 60*((Rp-Gp)/Delta + 4)\n\n Lit = (Cmax+Cmin)/2\n\n if Delta == 0:\n Sat = 0\n else:\n Sat = Delta/(1-abs(2*Lit-1))\n #print(\"H:\",Hue,\"S:\",Sat,\"L:\",Lit)\n return (Hue,Sat,Lit)", "def convert_rgb_hsl(rcol, gcol, bcol):\n\n mxi = max(rcol, gcol, bcol)\n mni = min(rcol, gcol, bcol)\n\n lcol = (mxi+mni)/2\n d_f = mxi-mni\n if mxi == mni:\n hcol = 0\n elif mxi == rcol:\n hcol = (60 * ((gcol-bcol)/d_f) + 360) % 360\n elif mxi == gcol:\n hcol = (60 * ((bcol-rcol)/d_f) + 120) % 360\n elif mxi == bcol:\n hcol = (60 * ((rcol-gcol)/d_f) + 240) % 360\n if d_f == 0:\n scol = 0\n else:\n scol = d_f/(1-abs(2*lcol-1))\n\n return hcol, scol, lcol", "def Saturation(img):\r\n factor = 2 * np.random.rand()\r\n HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n H, S, V = cv2.split(HSV)\r\n S= S* np.float(factor)\r\n S = np.where( S>255, 255,S)\r\n S = np.where( S<0, 0, S)\r\n HSV[:,:,1] = np.uint8(S)\r\n BGR = cv2.cvtColor(HSV, cv2.COLOR_HSV2BGR)\r\n return BGR", "def _hls2rgb(self,h):\n h=h**self.exponent\n if(self.invert): h=1.0-h\n h=h*360.0\n h=Numeric.fmod(h,360.0)\n if(self.hls_hls):\n h=h/60.0\n else:\n if(h<120):\n h=h/120.0 # /* 0..1 Rot..(Orange)..Gelb */\n elif(h<180):\n h=h/60.0 - 1.0 # /* 1..2 Gelb..Gruen */\n elif(h<240):\n h=h/30.0 - 4.0 # /* 2..4 Gruen..Blaugruen..Blau*/\n else:\n h=h/60.0 # /* 4..6 Blau..Purpur..Rot */\n c=int(h)\n frac=h-c\n if (self.hls_l<=0.5):\n maxi=self.hls_l*(1.0+self.hls_s)\n else:\n maxi=self.hls_l+self.hls_s-self.hls_l*self.hls_s\n mini=2*self.hls_l-maxi;\n diff=maxi-mini;\n if(self.hls_s==0): # /* grau */\n return(1.0,1.0,1.0) \n else:\n if(c==0):\n return(maxi,mini+frac*diff,mini)\n elif(c==1):\n return(mini+(1.0-frac)*diff,maxi,mini)\n elif(c==2):\n return(mini,maxi,mini+frac*diff)\n elif(c==3):\n return(mini,mini+(1.0-frac)*diff,maxi)\n elif(c==4):\n return(mini+frac*diff,mini,maxi)\n else:\n return(maxi,mini,mini+(1.0-frac)*diff)", "def greyscale(c):\n return desaturate(c, 1)", "def rgb2hls(t):\n r,g,b = t\n r /= 255.0\n g /= 255.0\n b /= 255.0\n return rgb_to_hls(r,g,b)", "def rgb2hls(t):\n r,g,b = t\n r /= 255.0\n g /= 255.0\n b /= 255.0\n return rgb_to_hls(r,g,b)", "def rgb2hsv(t):\n r,g,b = t\n r /= 255.0\n g /= 255.0\n b /= 255.0\n return rgb_to_hsv(r,g,b)", "def hyper2rgb(img, bands):\n rgb = spectral.get_rgb(img, bands)\n rgb /= np.max(rgb)\n rgb = np.asarray(255 * rgb, dtype='uint8')\n return rgb", "def sRGBGrayscale(x):\n rellum=sRGBLuminance(x)\n return [rellum,rellum,rellum]", "def filterToHue( bmp, savefile = '' ):\n for h in range(bmp.height):\n for w in range(bmp.width):\n HSL = RGBtoHSL( bmp.pixels[h][w] )\n hue = int(255*HSL[0]//360) # convert to 0-255 range\n bmp.pixels[h][w] = (hue,hue,hue)\n if( savefile != '' ):\n bmp.save(savefile)\n return bmp", "def rgb_to_hsv(arr):\r\n arr = np.asarray(arr)\r\n\r\n # check length of the last dimension, should be _some_ sort of rgb\r\n if arr.shape[-1] != 3:\r\n raise ValueError(\"Last dimension of input array must be 3; \"\r\n \"shape {} was found.\".format(arr.shape))\r\n\r\n in_shape = arr.shape\r\n arr = np.array(\r\n arr, copy=False,\r\n dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints.\r\n ndmin=2, # In case input was 1D.\r\n )\r\n out = np.zeros_like(arr)\r\n arr_max = arr.max(-1)\r\n ipos = arr_max > 0\r\n delta = arr.ptp(-1)\r\n s = np.zeros_like(delta)\r\n s[ipos] = delta[ipos] / arr_max[ipos]\r\n ipos = delta > 0\r\n # red is max\r\n idx = (arr[..., 0] == arr_max) & ipos\r\n out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]\r\n # green is max\r\n idx = (arr[..., 1] == arr_max) & ipos\r\n out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]\r\n # blue is max\r\n idx = (arr[..., 2] == arr_max) & ipos\r\n out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]\r\n\r\n out[..., 0] = (out[..., 0] / 6.0) % 1.0\r\n out[..., 1] = s\r\n out[..., 2] = arr_max\r\n out = out.reshape(in_shape)\r\n return out", "def hsv2rgb_float(h,s,v):\n\tif h<0.0: h=0.0\n\tif h>1.0: h=1.0\n\tif s<0.0: s=0.0\n\tif s>1.0: s=1.0\n\tif v<0.0: v=0.0\n\tif v>1.0: v=1.0\n\th = h*6.0\n\tif h<=3.0:\n\t\tif h<=1.0:\n\t\t\tr = 1.0;g=h;b=0.0\n\t\telif h<=2.0:\n\t\t\tr = 2.0-h;g=1.0;b=0.0\n\t\telse:\n\t\t\tr = 0.0;g=1.0;b=h-2.0\n\telse:\n\t\tif h<=4.0:\n\t\t\tr = 0.0;g=4.0-h;b=1.0\n\t\telif h<=5.0:\n\t\t\tr = h-4.0;g=0.0;b=1.0\n\t\telse:\n\t\t\tr = 1.0;g=0.0;b=6.0-h\n\tq = 1.0-s\n\tr = q+s*r\n\tg = q+s*g\n\tb = q+s*b\n\treturn (v*r,v*g,v*b)", "def pil_hue_histogram(h):\n np_hist = np_hsv_hue_histogram(h)\n pil_hist = util.np_to_pil(np_hist)\n return pil_hist", "def HSL_to_RGB(cobj, target_rgb, *args, **kwargs):\r\n \r\n H = cobj.hsl_h\r\n S = cobj.hsl_s\r\n L = cobj.hsl_l\r\n \r\n if L < 0.5:\r\n var_q = L * (1.0 + S)\r\n else:\r\n var_q = L + S - (L * S)\r\n \r\n var_p = 2.0 * L - var_q\r\n \r\n # H normalized to range [0,1]\r\n h_sub_k = (H / 360.0)\r\n \r\n t_sub_R = h_sub_k + (1.0 / 3.0)\r\n t_sub_G = h_sub_k\r\n t_sub_B = h_sub_k - (1.0 / 3.0)\r\n \r\n rgb_r = __Calc_HSL_to_RGB_Components(var_q, var_p, t_sub_R)\r\n rgb_g = __Calc_HSL_to_RGB_Components(var_q, var_p, t_sub_G)\r\n rgb_b = __Calc_HSL_to_RGB_Components(var_q, var_p, t_sub_B)\r\n\r\n # In the event that they define an HSV color and want to convert it to \r\n # a particular RGB space, let them override it here.\r\n if target_rgb is not None:\r\n rgb_type = target_rgb\r\n else:\r\n rgb_type = cobj.rgb_type\r\n \r\n return target_rgb(rgb_r, rgb_g, rgb_b)", "def sRGB_to_sRGB_linear(rgb):\n return np.where(rgb <= 0.03928, rgb / 12.92, np.power((rgb + 0.055) / 1.055, 2.4))", "def hsv(img):\n\tif img is None:\n\t\tprint \"Img is None\"\n\t\tsys.exit()\n\tif len(img.shape) > 2:\n\t\treturn cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\treturn None", "def hsv_to_rgb(hsv):\r\n hsv = np.asarray(hsv)\r\n\r\n # check length of the last dimension, should be _some_ sort of rgb\r\n if hsv.shape[-1] != 3:\r\n raise ValueError(\"Last dimension of input array must be 3; \"\r\n \"shape {shp} was found.\".format(shp=hsv.shape))\r\n\r\n in_shape = hsv.shape\r\n hsv = np.array(\r\n hsv, copy=False,\r\n dtype=np.promote_types(hsv.dtype, np.float32), # Don't work on ints.\r\n ndmin=2, # In case input was 1D.\r\n )\r\n\r\n h = hsv[..., 0]\r\n s = hsv[..., 1]\r\n v = hsv[..., 2]\r\n\r\n r = np.empty_like(h)\r\n g = np.empty_like(h)\r\n b = np.empty_like(h)\r\n\r\n i = (h * 6.0).astype(int)\r\n f = (h * 6.0) - i\r\n p = v * (1.0 - s)\r\n q = v * (1.0 - s * f)\r\n t = v * (1.0 - s * (1.0 - f))\r\n\r\n idx = i % 6 == 0\r\n r[idx] = v[idx]\r\n g[idx] = t[idx]\r\n b[idx] = p[idx]\r\n\r\n idx = i == 1\r\n r[idx] = q[idx]\r\n g[idx] = v[idx]\r\n b[idx] = p[idx]\r\n\r\n idx = i == 2\r\n r[idx] = p[idx]\r\n g[idx] = v[idx]\r\n b[idx] = t[idx]\r\n\r\n idx = i == 3\r\n r[idx] = p[idx]\r\n g[idx] = q[idx]\r\n b[idx] = v[idx]\r\n\r\n idx = i == 4\r\n r[idx] = t[idx]\r\n g[idx] = p[idx]\r\n b[idx] = v[idx]\r\n\r\n idx = i == 5\r\n r[idx] = v[idx]\r\n g[idx] = p[idx]\r\n b[idx] = q[idx]\r\n\r\n idx = s == 0\r\n r[idx] = v[idx]\r\n g[idx] = v[idx]\r\n b[idx] = v[idx]\r\n\r\n rgb = np.stack([r, g, b], axis=-1)\r\n\r\n return rgb.reshape(in_shape)", "def rgb_to_hsv(r, g, b):\n\tmaxc = max(r, g, b)\n\tminc = min(r, g, b)\n\tv = maxc\n\tif minc == maxc:\n\t\treturn 0.0, 0.0, v\n\ts = (maxc-minc) / maxc\n\trc = (maxc-r) / (maxc-minc)\n\tgc = (maxc-g) / (maxc-minc)\n\tbc = (maxc-b) / (maxc-minc)\n\tif r == maxc:\n\t\th = bc-gc\n\telif g == maxc:\n\t\th = 2.0+rc-bc\n\telse:\n\t\th = 4.0+gc-rc\n\th = (h/6.0) % 1.0\n\treturn h, s, v", "def hsvHue(rgb):\n return rgbToHsv(rgb)[0]", "def to_image(array, hue=.62):\n\n # apply saturation function\n image_data = np.log(array + 1)\n\n # create colormap, change these values to adjust to look of your plot\n saturation_values = [[0, 0], [.75, .68], [.78, .87], [0, 1]]\n colors = [hsv_to_rgb([hue, x, y]) for x, y in saturation_values]\n cmap = LinearSegmentedColormap.from_list(\"my_colormap\", colors)\n\n # apply colormap\n out = cmap(image_data / image_data.max())\n\n # convert to 8-bit unsigned integer\n out = (out * 255).astype(np.uint8)\n return out", "def tohls(self):\n\n return rgb_to_hls(self.r * RGB_CHANNEL_SCALE, self.g * RGB_CHANNEL_SCALE, self.b * RGB_CHANNEL_SCALE)", "def HSVtoRGB(h, s, v):\n\n\tc = v * s\n\th2 = h / 60\n\tx = c * (1 - abs((h2 % 2) - 1))\n\n\tif h2 < 1:\n\t\tr, g, b = c, x, 0\n\telif h2 < 2:\n\t\tr, g, b = x, c, 0\n\telif h2 < 3:\n\t\tr, g, b = 0, c, x\n\telif h2 < 4:\n\t\tr, g, b = 0, x, c\n\telif h2 < 5:\n\t\tr, g, b = x, 0, c\n\telif h2 < 6:\n\t\tr, g, b = c, 0, x\n\t\n\tm = v - c\n\treturn r + m, g + m, b + m", "def hs_color(self):\n if self._rgb:\n return color_util.color_RGB_to_hs(*self._rgb)\n else:\n return None", "def as_hsv(self):\n return rgb_to_hsv(*self.normalise().as_tuple())", "def hsv_to_rgb(hsv):\n h = hsv[:, :, 0]\n s = hsv[:, :, 1]\n v = hsv[:, :, 2]\n\n r = np.empty_like(h)\n g = np.empty_like(h)\n b = np.empty_like(h)\n\n i = (h * 6.0).astype(np.int)\n f = (h * 6.0) - i\n p = v * (1.0 - s)\n q = v * (1.0 - s * f)\n t = v * (1.0 - s * (1.0 - f))\n\n idx = i % 6 == 0\n r[idx] = v[idx]\n g[idx] = t[idx]\n b[idx] = p[idx]\n\n idx = i == 1\n r[idx] = q[idx]\n g[idx] = v[idx]\n b[idx] = p[idx]\n\n idx = i == 2\n r[idx] = p[idx]\n g[idx] = v[idx]\n b[idx] = t[idx]\n\n idx = i == 3\n r[idx] = p[idx]\n g[idx] = q[idx]\n b[idx] = v[idx]\n\n idx = i == 4\n r[idx] = t[idx]\n g[idx] = p[idx]\n b[idx] = v[idx]\n\n idx = i == 5\n r[idx] = v[idx]\n g[idx] = p[idx]\n b[idx] = q[idx]\n\n idx = s == 0\n r[idx] = v[idx]\n g[idx] = v[idx]\n b[idx] = v[idx]\n\n rgb = np.empty_like(hsv)\n rgb[:, :, 0] = r\n rgb[:, :, 1] = g\n rgb[:, :, 2] = b\n return rgb", "def getHSV(self):\n\t\tself.colour = [self.getH(), self.getS(),1]\n\t\treturn self.colour", "def getHSV((r,g,b)):\n return rgb_to_hsv(r/255., g/255., b/255.)", "def convert_rgb_hsv(rcol, gcol, bcol):\n\n mxi = max(rcol, gcol, bcol)\n mni = min(rcol, gcol, bcol)\n\n d_f = mxi-mni\n if mxi == mni:\n hcol = 0\n elif mxi == rcol:\n hcol = (60 * ((gcol-bcol)/d_f) + 360) % 360\n elif mxi == gcol:\n hcol = (60 * ((bcol-rcol)/d_f) + 120) % 360\n elif mxi == bcol:\n hcol = (60 * ((rcol-gcol)/d_f) + 240) % 360\n if mxi == 0:\n scol = 0\n else:\n scol = d_f/mxi\n vcol = mxi\n return hcol, scol, vcol", "def _rgb2plot(self, data):\n\n mindata, maxdata = np.percentile(data[np.isfinite(data)], (0.01, 99))\n return np.clip((data - mindata) / (maxdata-mindata) * 255, 0, 255).astype(np.uint8)", "def hess(color, mag, binsize, **kw):\n\n # cbin = out[0]\n # mbin = out[1]\n # imshow(out[2])\n # yticks(range(0, len(mbin), 4), mbin[range(0,len(mbin),4)])\n # xticks(range(0, len(cbin), 4), cbin[range(0,len(cbin),4)])\n # ylim([ylim()[1], ylim()[0]])\n\n # 2009-02-08 23:01 IJC: Created, on a whim, for LMC data (of course)\n # 2009-02-21 15:45 IJC: Updated with cbin, mbin options\n\n from numpy import arange, zeros\n\n defaults = dict(mbin=None, cbin=None, verbose=False)\n\n for key in defaults:\n if (not kw.has_key(key)):\n kw[key] = defaults[key]\n\n if kw['mbin']==None:\n mbin = arange(mag.min(), mag.max(), binsize)\n else:\n mbin = array(kw['mbin']).copy()\n if kw['cbin']==None:\n cbin = arange(color.min(), color.max(), binsize)\n else:\n cbin = array(kw['cbin']).copy()\n\n hess = zeros((len(mbin), len(cbin)), float)\n for ii in range(len(cbin)):\n cindex = (color<(cbin[ii]+binsize/2)) * (color>(cbin[ii]-binsize/2)) \n for jj in range(len(mbin)):\n index = cindex * (mag<(mbin[jj]+binsize/2)) * (mag>(mbin[jj]-binsize/2)) \n hess[jj,ii] = index.sum()\n\n\n return (cbin, mbin, hess)", "def hsv(self):\n return colorsys.rgb_to_hsv(self.red, self.green, self.blue)", "def hls_scale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2HLS)", "def hue_palette(hues, low=32, high=255, sat=1.0):\n if len(hues) == 1:\n step = 0\n else:\n step = (high - low) / (len(hues) - 1)\n return [colorsys.hsv_to_rgb(hues[i], sat, low + (i * step))\n for i in range(len(hues))]", "def RGB_to_HSV(cobj, *args, **kwargs):\r\n\r\n var_R = cobj.rgb_r\r\n var_G = cobj.rgb_g\r\n var_B = cobj.rgb_b\r\n \r\n var_max = max(var_R, var_G, var_B)\r\n var_min = min(var_R, var_G, var_B)\r\n \r\n var_H = __RGB_to_Hue(var_R, var_G, var_B, var_min, var_max)\r\n \r\n if var_max == 0:\r\n var_S = 0\r\n else:\r\n var_S = 1.0 - (var_min / var_max)\r\n \r\n var_V = var_max\r\n\r\n hsv_h = var_H\r\n hsv_s = var_S\r\n hsv_v = var_V\r\n\r\n return HSVColor(\r\n var_H, var_S, var_V)", "def _hsv_to_rgb(img):\n h, s, v = img.unbind(axis=-3)\n f = h * 6.0\n i = paddle.floor(f)\n f = f - i\n i = i.astype(paddle.int32) % 6\n\n p = paddle.clip(v * (1.0 - s), 0.0, 1.0)\n q = paddle.clip(v * (1.0 - s * f), 0.0, 1.0)\n t = paddle.clip(v * (1.0 - s * (1.0 - f)), 0.0, 1.0)\n\n mask = paddle.equal(\n i.unsqueeze(axis=-3),\n paddle.arange(6, dtype=i.dtype).reshape((-1, 1, 1)),\n ).astype(img.dtype)\n matrix = paddle.stack(\n [\n paddle.stack([v, q, p, p, t, v], axis=-3),\n paddle.stack([t, v, v, q, p, p], axis=-3),\n paddle.stack([p, p, t, v, v, q], axis=-3),\n ],\n axis=-4,\n )\n return paddle.einsum(\"...ijk, ...xijk -> ...xjk\", mask, matrix)", "def gray_heat(map):\n map = (map - 0.5) * 5.0 + 0.5\n H = map.shape[0]\n W = map.shape[1]\n out = np.zeros((H,W,3))\n for h in range(0,H):\n for w in range(0,W):\n # (240, )\n out[h,w,:] = colorsys.hls_to_rgb((1.0-map[h,w])*0.66667, 0.5, 1.0)\n return out", "def hsv_to_rgb(hsv, dtype=numpy.float64):\n arr = _prepare_colorarray(hsv, dtype)\n\n hi = numpy.floor(arr[:, 0] * 6)\n f = arr[:, 0] * 6 - hi\n p = arr[:, 2] * (1 - arr[:, 1])\n q = arr[:, 2] * (1 - f * arr[:, 1])\n t = arr[:, 2] * (1 - (1 - f) * arr[:, 1])\n v = arr[:, 2]\n\n hi = numpy.dstack([hi, hi, hi]).astype(numpy.uint8) % 6\n out = numpy.choose(hi, [numpy.dstack((v, t, p)),\n numpy.dstack((q, v, p)),\n numpy.dstack((p, v, t)),\n numpy.dstack((p, q, v)),\n numpy.dstack((t, p, v)),\n numpy.dstack((v, p, q))])\n\n return out[0]", "def rgb2hsv(red, green, blue):\n return colorsys.rgb_to_hsv(red, green, blue)", "def compute_new_hsv(im):\n eps = 1e-10\n r,g,b = np.array(cv2.split(im)) + eps\n traditional_hsv = cv2.cvtColor(im, cv2.COLOR_RGB2HSV)\n numerator = np.log(r) - np.log(g)\n denominator = np.log(r) + np.log(g) - 2*np.log(b) + eps\n new_hue = np.clip(np.round(numerator/denominator).astype(np.uint8), 0, 180)\n new_hsv = np.zeros_like(traditional_hsv).astype(np.uint8)\n new_hsv[:, :, 0] = new_hue\n new_hsv[:, :, 1] = traditional_hsv[:, :, 1]\n new_hsv[:, :, 2] = traditional_hsv[:, :, 2]\n return new_hsv", "def _hess(val, r):\n\n gval = grad(val, r,\n grad_outputs=torch.ones_like(val),\n create_graph=True)[0]\n\n hval = grad(gval, r,\n grad_outputs=torch.ones_like(gval))[0]\n\n return hval, gval", "def rgb_to_hls(rgb):\n return colorsys.rgb_to_hls(rgb[0] / 255, rgb[1] / 255, rgb[2] / 255)", "def rgb2hsv(rgb, h_prec=0, sv_prec=3):\n for value in rgb:\n if not 0 <= value <= 255:\n raise ValueError('One or more RGB values are outside [0, 255]')\n\n r, g, b = rgb[0] / 255, rgb[1] / 255, rgb[2] / 255\n\n c_max = max(r, g, b)\n c_min = min(r, g, b)\n delta = c_max - c_min\n\n # Hue\n if delta == 0:\n h = 0\n elif c_max == r:\n h = 60 * (((g - b) / delta) % 6)\n elif c_max == g:\n h = 60 * ((b - r) / delta + 2)\n else:\n h = 60 * ((r - g) / delta + 4)\n\n # Saturation\n if c_max == 0:\n s = 0\n else:\n s = delta / c_max\n\n # Value / Brightness\n v = c_max\n\n return round(h, h_prec), round(s, sv_prec), round(v, sv_prec)", "def color_from_value(self, value):\n \n return ImageColor.getrgb(\"hsl(%d,%d%%,%d%%)\" % (int( (1.0 - value) * 360 ), 80, 50))", "def bgr2hsv(BGR):\n\tcolor = np.uint8([[BGR]])\n\thsv_color = cv2.cvtColor(color, cv2.COLOR_BGR2HSV)\n\treturn hsv_color", "def _toRgbImage(self, fractal, colors, color_offset):\n hsv_img = np.array(\n [\n # Cycle through color wheel.\n (fractal * colors + color_offset) % 1,\n\n # Saturation = 1 where fractal values > 0,\n # Saturation = 0 otherwise.\n fractal.astype(dtype=bool).astype(dtype=float),\n\n # Invert colours\n 1 - fractal\n ]\n ).astype(dtype=float).T\n\n rgb_img = (mpl.colors.hsv_to_rgb(hsv_img) * 255).astype(dtype=np.uint8)\n return rgb_img", "def test_conversion_to_rgb_zero_div(self):\r\n\r\n lchuv = LCHuvColor(0.0, 0.0, 0.0)\r\n rgb = convert_color(lchuv, sRGBColor)\r\n self.assertColorMatch(rgb, sRGBColor(0.0, 0.0, 0.0))", "def hue(value):\n value = int(value)\n if value < 0 or value > 65535:\n raise ValueError('Hue is a value between 0 and 65535')\n return value", "def _toRgbImage(self, fractal, colors, color_offset):\n hsv_img = np.array(\n [\n # Cycle through color wheel.\n (fractal * colors + color_offset) % 1,\n\n # Saturation = fractal value.\n fractal,\n\n # Maximum value.\n np.ones(fractal.shape)\n ]\n ).astype(dtype=float).T\n\n rgb_img = (mpl.colors.hsv_to_rgb(hsv_img) * 255).astype(dtype=np.uint8)\n return rgb_img", "def updateFromHsl ( self ):\n rgb = Colz.hslToRgb( self.h, self.s, self.l )\n self.r = rgb[0]\n self.g = rgb[1]\n self.b = rgb[2]\n self.rgb = rgb\n self.rgba = [ rgb[0], rgb[1], rgb[2], self.a ]\n # Updates Hex\n self.hex = Colz.rgbToHex( rgb[0], rgb[1], rgb[2] )", "def HSV_to_RGB(cobj, target_rgb, *args, **kwargs):\r\n \r\n H = cobj.hsv_h\r\n S = cobj.hsv_s\r\n V = cobj.hsv_v\r\n \r\n h_floored = int(math.floor(H))\r\n h_sub_i = int(h_floored / 60) % 6\r\n var_f = (H / 60.0) - (h_floored // 60)\r\n var_p = V * (1.0 - S)\r\n var_q = V * (1.0 - var_f * S)\r\n var_t = V * (1.0 - (1.0 - var_f) * S)\r\n \r\n if h_sub_i == 0:\r\n rgb_r = V\r\n rgb_g = var_t\r\n rgb_b = var_p\r\n elif h_sub_i == 1:\r\n rgb_r = var_q\r\n rgb_g = V\r\n rgb_b = var_p\r\n elif h_sub_i == 2:\r\n rgb_r = var_p\r\n rgb_g = V\r\n rgb_b = var_t\r\n elif h_sub_i == 3:\r\n rgb_r = var_p\r\n rgb_g = var_q\r\n rgb_b = V\r\n elif h_sub_i == 4:\r\n rgb_r = var_t\r\n rgb_g = var_p\r\n rgb_b = V\r\n elif h_sub_i == 5:\r\n rgb_r = V\r\n rgb_g = var_p\r\n rgb_b = var_q\r\n else:\r\n raise ValueError(\"Unable to convert HSL->RGB due to value error.\")\r\n\r\n # In the event that they define an HSV color and want to convert it to \r\n # a particular RGB space, let them override it here.\r\n if target_rgb is not None:\r\n rgb_type = target_rgb\r\n else:\r\n rgb_type = cobj.rgb_type\r\n \r\n return target_rgb(rgb_r, rgb_g, rgb_b)", "def sRGB_linear_to_sRGB(rgb_linear):\n\n \"\"\"sRGB standard for gamma inverse correction.\"\"\"\n rgb = np.where(\n rgb_linear <= 0.00304,\n 12.92 * rgb_linear,\n 1.055 * np.power(rgb_linear, 1.0 / 2.4) - 0.055,\n )\n\n # clip intensity if needed (rgb values > 1.0) by scaling\n rgb_max = np.amax(rgb, axis=0) + 0.00001 # avoid division by zero\n intensity_cutoff = 1.0\n rgb = np.where(rgb_max > intensity_cutoff, rgb * intensity_cutoff / (rgb_max), rgb)\n\n return rgb", "def flow_to_rgb(flow):\n im1 = flow[:, :, 0]\n im2 = flow[:, :, 1]\n\n h, w = flow.shape[:2]\n\n # Use Hue, Saturation, Value colour model\n hsv = np.zeros((h, w, 3), dtype=np.float32)\n hsv[..., 1] = 1\n\n mag, ang = cv2.cartToPolar(im1, im2)\n hsv[..., 0] = ang * 180 / np.pi\n hsv[..., 2] = cv2.normalize(mag, None, 0, 1, cv2.NORM_MINMAX)\n\n return cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)", "def hsv_to_cvhsv(self, h, s, v):\r\n cv_h = int(179 * h / 360)\r\n cv_s = int(255 * s / 100)\r\n cv_v = int(255 * v / 100)\r\n colour = np.array([cv_h, cv_s, cv_v])\r\n return colour", "def hsvToHsl ( h, s = 0.0, v = 0.0 ):\n # Check if first argument is list\n if isinstance(h, list):\n s = h[1]\n v = h[2]\n h = h[0]\n return Colz.rgbToHsl( Colz.hsbToRgb( h, s, v ) )", "def preprocess_image(img):\r\n\r\n hsvImg = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n\r\n hsvImg[...,1] = hsvImg[...,1]*1.75 #increase saturation by 175%\r\n\r\n image_f =cv2.cvtColor(hsvImg,cv2.COLOR_HSV2BGR)\r\n\r\n return image_f", "def colorize(self, deg):\n\n h, l, s = self.tohls()\n h = clamp(deg * HUE_SCALE, 0.0, 1.0)\n self.fromhls(h, l, s)", "def np_hsv_hue_histogram(h):\n figure = plt.figure()\n canvas = figure.canvas\n _, _, patches = plt.hist(h, bins=360)\n plt.title(\"HSV Hue Histogram, mean=%3.1f, std=%3.1f\" % (np.mean(h), np.std(h)))\n\n bin_num = 0\n for patch in patches:\n rgb_color = colorsys.hsv_to_rgb(bin_num / 360.0, 1, 1)\n patch.set_facecolor(rgb_color)\n bin_num += 1\n\n canvas.draw()\n w, h = canvas.get_width_height()\n np_hist = np.fromstring(canvas.get_renderer().tostring_rgb(), dtype=np.uint8).reshape(h, w, 3)\n plt.close(figure)\n util.np_info(np_hist)\n return np_hist", "def fromHSL(h, s, l):\n rgb = colorsys.hls_to_rgb(h,l,s)\n return IColor(rgb[0], rgb[1], rgb[2])", "def normalize(arr):\n arr = arr.astype('float')\n # Do not touch the alpha channel\n for i in range(1):\n minval = arr[...,i].min()\n maxval = arr[...,i].max()\n if minval != maxval:\n arr[...,i] -= minval\n arr[...,i] *= (255.0/(maxval-minval))\n return arr", "def applyHSV(img):\n\treturn applyColorMap(img, \"hsv\")", "def mel_to_hz(mels: torch.Tensor):\n return 700 * (10**(mels / 2595) - 1)", "def setHsla ( self, h, s = 0.0, l = 0.0, a = 1.0 ):\n #self.reset()\n\n # Check if argument is list\n if isinstance(h, list):\n s = h[1]\n l = h[2]\n if len(h) == 4: # Alpha in the list\n a = h[3]\n h = h[0]\n\n # If any component is int ( 0 - 255 ) convert to float ( 0 - 1)\n hsla = [ h, s, l, a ]\n for i in range(len(hsla)):\n if isinstance( hsla[i], int ):\n hsla[i] = hsla[i] / 360.0 if i == 0 else hsla[i] / 100.0\n\n self.h = hsla[0]\n self.s = hsla[1]\n self.l = hsla[2]\n self.a = hsla[3]\n self.hsl = [ hsla[0], hsla[1], hsla[2] ]\n self.hsla = [ hsla[0], hsla[1], hsla[2], hsla[3] ]\n\n self.updateFromHsl()", "def togray(self,value):\n (red,green,blue) = self.unpack_value(value)\n \n gray = []\n for i in xrange(1024):\n graypx = (0.299*float(red[i]) + 0.587*float(green[i]) +\n 0.114*float(blue[i]))/255.\n gray.append(graypx)\n \n return gray", "def unique_colors_rgb(n):\r\n hues = []\r\n # i is in the range 0, 1, ..., n - 1\r\n for i in range(1, n + 1):\r\n hues.append(360.0 / i)\r\n\r\n hs = []\r\n for hue in hues:\r\n h = math.floor(hue / 60) % 6\r\n hs.append(h)\r\n\r\n fs = []\r\n for hue in hues:\r\n f = hue / 60 - math.floor(hue / 60)\r\n fs.append(f)\r\n\r\n rgbcolors = []\r\n for h, f in zip(hs, fs):\r\n v = 1\r\n p = 0\r\n q = 1 - f\r\n t = f\r\n if h == 0:\r\n color = v, t, p\r\n elif h == 1:\r\n color = q, v, p\r\n elif h == 2:\r\n color = p, v, t\r\n elif h == 3:\r\n color = p, q, v\r\n elif h == 4:\r\n color = t, p, v\r\n elif h == 5:\r\n color = v, p, q\r\n rgbcolors.append(color)\r\n\r\n return rgbcolors", "def hls(self):\n return colorsys.rgb_to_hls(self.red, self.green, self.blue)", "def hs_color(self) -> tuple[float, float] | None:\n colour_json = self.tuya_device.status.get(self.dp_code_colour)\n if not colour_json:\n return None\n colour_data = json.loads(colour_json)\n s_range = self._tuya_hsv_s_range()\n return colour_data.get(\"h\", 0), self.remap(\n colour_data.get(\"s\", 0),\n s_range[0],\n s_range[1],\n HSV_HA_SATURATION_MIN,\n HSV_HA_SATURATION_MAX,\n )", "def _hz_to_semitones(self, hz):\n return np.log(hz / self._a440) / np.log(self._a)", "def updateFromRgb ( self ):\n hsl = self.rgbToHsl( self.r, self.g, self.b )\n self.h = hsl[0]\n self.s = hsl[1]\n self.l = hsl[2]\n self.hsl = hsl\n self.hsla = [ hsl[0], hsl[1], hsl[2], self.a ]", "def hsl(self):\n hue = ctypes.c_double(0.0)\n saturation = ctypes.c_double(0.0)\n lightness = ctypes.c_double(0.0)\n with self:\n library.PixelGetHSL(self.resource,\n ctypes.byref(hue),\n ctypes.byref(saturation),\n ctypes.byref(lightness))\n return (hue.value, saturation.value, lightness.value)", "def hsv_rep_color(probs, phis):\n h = (phis + np.pi) / 2 / np.pi\n s = np.ones_like(phis)\n v = probs\n\n hsv_image = np.stack([h, s, v], axis=-1)\n\n return hsv2rgb(hsv_image)", "def inverse_color_hex(hx):\n return inverse_color_rgb(hex_to_rgb(hx))", "def sinh(data):\n return _make.sinh(data)", "def hsl_to_rgb(hsl):\n try:\n h, s, l = hsl\n except TypeError:\n raise ValueError(hsl)\n try:\n h /= 360\n q = l * (1 + s) if l < 0.5 else l + s - l * s\n p = 2 * l - q\n except TypeError:\n raise ValueError(hsl)\n\n rgb = []\n for c in (h + 1 / 3, h, h - 1 / 3):\n if c < 0:\n c += 1\n elif c > 1:\n c -= 1\n\n if c < 1 / 6:\n c = p + (q - p) * 6 * c\n elif c < 0.5:\n c = q\n elif c < 2 / 3:\n c = p + (q - p) * 6 * (2 / 3 - c)\n else:\n c = p\n rgb.append(round(c * 255))\n\n return tuple(rgb)", "def normalise(self):\n return self.map_channels(lambda component: float(component) / 255.0)", "def convert_to_RGB_255(colors):\n return (colors[0]*255.0, colors[1]*255.0, colors[2]*255.0)", "def equalizeHist_color(img):\n image = np.empty(img.shape)\n for c in range(img.shape[2]):\n channel = img[:, :, c]\n channel = channel.astype(np.uint8)\n\n # CLAHE\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(2, 2))\n channel = clahe.apply(channel)\n\n # http://docs.opencv.org/3.1.0/d5/daf/tutorial_py_histogram_equalization.html\n channel = cv2.equalizeHist(channel)\n try:\n image[:, :, c] = channel\n except Exception as e:\n print(str(e))\n return image", "def hsv2rgb(t):\n r,g,b = hsv_to_rgb(*t)\n r *= 255\n g *= 255\n b *= 255\n return (int(r),int(g),int(b))", "def yiq2rgb(im_yiq):\n return multiply_by_left_matrix(np.linalg.inv(YIQ_MATRIX), im_yiq)", "def sRGBFromLabD50(lab):\n return xyzTosRGBD50(labToXYZ(lab, [0.9642957, 1, 0.8251046]))", "def convert_6_channels_to_rgb(channels):\n base = channels[0]\n # qual is the minimum of base quality and mapping quality at each position\n # 254 is the max value for quality scores because the SAM specification has\n # 255 reserved for unavailable values.\n qual = np.minimum(channels[1], channels[2])\n strand = channels[3]\n # alpha is <supports variant> * <base != reference>\n alpha = np.multiply(channels[4] / 254.0, channels[5] / 254.0)\n return np.multiply(np.stack([base, qual, strand]),\n alpha).astype(np.uint8).transpose([1, 2, 0])", "def hsl2rgb(hsl, prec=0):\n h, s, l = hsl[0] / 60, hsl[1], hsl[2]\n\n if not 0 <= h < 6:\n raise ValueError('The given H value is outside [0, 360)')\n elif not 0 <= s <= 1 or not 0 <= l <= 1:\n raise ValueError('The given S and/or L values are outside [0, 1]')\n\n c = (1 - abs(2 * l - 1)) * s\n m = l - c / 2\n\n if 0 <= h < 1:\n r, g, b = c, c * (1 - abs(h % 2 - 1)), 0\n elif 1 <= h < 2:\n r, g, b = c * (1 - abs(h % 2 - 1)), c, 0\n elif 2 <= h < 3:\n r, g, b = 0, c, c * (1 - abs(h % 2 - 1))\n elif 3 <= h < 4:\n r, g, b = 0, c * (1 - abs(h % 2 - 1)), c\n elif 4 <= h < 5:\n r, g, b = c * (1 - abs(h % 2 - 1)), 0, c\n else:\n r, g, b = c, 0, c * (1 - abs(h % 2 - 1))\n\n r, g, b = r + m, g + m, b + m\n\n return round(255 * r, prec), round(255 * g, prec), round(255 * b, prec)", "def HsvHistogram(self):\n if not self._hsvHistogram is 0:\n return self._hsvHistogram\n if not self._bicHistogram is 0:\n self._hsvHistogram = self._bicHistogram[:28] + self._bicHistogram[28:]\n return self._hsvHistogram\n hsvimg = self.HsvImage()\n #Note that in OpenCV hsv uses the ranges [0,179], [0,255] and [0,255] respectively\n histogram = numpy.zeros(28, dtype=numpy.float32)\n [width, height, depth] = hsvimg.shape\n for y in xrange(height):\n for x in xrange(width):\n histogram[self.HsvBin(x,y)] += 1\n \n histogram /= width*height\n \n sHistogram = numpy.zeros(28, dtype=numpy.float32)\n sHistogram[0] = 0.25 * histogram[20] + 0.5 * histogram[0] + 0.25 * histogram[1]\n sHistogram[20] = 0.5 * histogram[20] + 0.25 * histogram[0] + 0.25 * histogram[19]\n \n for i in xrange(1, 19):\n sHistogram[i] = 0.25 * histogram[i-1] + 0.5 * histogram[i] + 0.25 * histogram[i+1]\n \n self._hsvHistogram = sHistogram\n return sHistogram" ]
[ "0.6703506", "0.6296489", "0.6287106", "0.60505944", "0.59786993", "0.5978399", "0.59774935", "0.59652996", "0.5940715", "0.59046626", "0.58576584", "0.5831712", "0.58161163", "0.58112276", "0.5776856", "0.57461786", "0.57437605", "0.5730638", "0.5720245", "0.56746477", "0.5639751", "0.56256825", "0.56137604", "0.56137604", "0.55920374", "0.5565166", "0.5555471", "0.5552449", "0.55394083", "0.5534414", "0.5507941", "0.550009", "0.54960287", "0.5491378", "0.54867065", "0.548629", "0.54804796", "0.5470277", "0.5466227", "0.54493076", "0.5442034", "0.5441727", "0.5339469", "0.5338201", "0.53353715", "0.5333019", "0.5332985", "0.5332706", "0.53325623", "0.532952", "0.5329371", "0.532392", "0.5315917", "0.5302909", "0.52763903", "0.5271529", "0.5263153", "0.52420497", "0.5239855", "0.52345526", "0.5231351", "0.5207205", "0.51836234", "0.51745075", "0.51666754", "0.5161309", "0.51529855", "0.514886", "0.5141537", "0.5122807", "0.51171005", "0.51119965", "0.5095027", "0.5092464", "0.5083064", "0.50785315", "0.50778687", "0.5077431", "0.50677735", "0.50651425", "0.50597024", "0.50593424", "0.5053946", "0.5048047", "0.5045883", "0.50411373", "0.5033344", "0.5031116", "0.5013554", "0.5010312", "0.49967337", "0.49890718", "0.49872026", "0.4984333", "0.4984062", "0.4974013", "0.4964388", "0.49627635", "0.49584544", "0.4951921" ]
0.7039188
0
Converts an array [..., channels] of RGB values to Digital Y'CbCr (0255). RGB values are assumed to be normalized to (0, 1). Don't forget to cast to uint8 for pillow.
def rgb_to_ycbcr(image: np.ndarray) -> np.ndarray: """ from RGB (0-1). """ if not is_rgb(image): raise ValueError("Input needs to be an array of RGB values") m = np.array( [ [+065.481, +128.553, +024.966], [-037.797, -074.203, +112.000], [+112.000, -093.786, -018.214], ] ) a = np.array([16, 128, 128]) return np.dot(image, m.T) + a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rgb_to_ycbcr(image: torch.Tensor) -> torch.Tensor:\n r: torch.Tensor = image[..., 0, :, :]\n g: torch.Tensor = image[..., 1, :, :]\n b: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n y: torch.Tensor = 0.299 * r + 0.587 * g + 0.114 * b\n cb: torch.Tensor = (b - y) * 0.564 + delta\n cr: torch.Tensor = (r - y) * 0.713 + delta\n return torch.stack([y, cb, cr], -3)", "def ycbcr_to_rgb(image: torch.Tensor) -> torch.Tensor:\n y: torch.Tensor = image[..., 0, :, :]\n cb: torch.Tensor = image[..., 1, :, :]\n cr: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n cb_shifted: torch.Tensor = cb - delta\n cr_shifted: torch.Tensor = cr - delta\n\n r: torch.Tensor = y + 1.403 * cr_shifted\n g: torch.Tensor = y - 0.714 * cr_shifted - 0.344 * cb_shifted\n b: torch.Tensor = y + 1.773 * cb_shifted\n return torch.stack([r, g, b], -3)", "def yuv2rgb(im):\n ## conflicting definitions exist depending on whether you use the full range\n ## of YCbCr or clamp out to the valid range. see here\n ## http://www.equasys.de/colorconversion.html\n ## http://www.fourcc.org/fccyvrgb.php\n from numpy import dot, ndarray, array\n # if not im.dtype == 'uint8':\n # raise ImageUtilsError('yuv2rgb only implemented for uint8 arrays')\n\n ## better clip input to the valid range just to be on the safe side\n yuv = ndarray(im.shape) ## float64\n yuv[:, :, 0] = im[:, :, 0].clip(16, 235).astype(yuv.dtype) - 16\n yuv[:, :, 1:] = im[:, :, 1:].clip(16, 240).astype(yuv.dtype) - 128\n\n ## ITU-R BT.601 version (SDTV)\n A = array([[1., 0., 0.701],\n [1., -0.886 * 0.114 / 0.587, -0.701 * 0.299 / 0.587],\n [1., 0.886, 0.]])\n A[:, 0] *= 255. / 219.\n A[:, 1:] *= 255. / 112.\n\n ## ITU-R BT.709 version (HDTV)\n # A = array([[1.164, 0., 1.793],\n # [1.164, -0.213, -0.533],\n # [1.164, 2.112, 0.]])\n\n rgb = dot(yuv, A.T)\n return rgb.clip(0, 255).astype('uint8')", "def to_ycc(color):\n return rgb_to_ycc(*[x / 255.0 for x in color])", "def rgb_to_ycbcr(rgb):\n transform = np.matrix('.299, .587, .114; -.16874, -.33126, .5; .5, -.41869, -.08131')\n\n def apply_transform(x):\n return np.array(np.dot(transform, x))[0]\n\n return np.apply_along_axis(apply_transform, 2, rgb)", "def rgb_to_ycbcr(rgb_uint8):\n if rgb_uint8.dtype != numpy.uint8:\n raise TypeError('`rgb_uint8.dtype` is not equal to `numpy.uint8`.')\n \n # If the check below did not exist, `rgb_to_ycbcr` would\n # not crash if `rgb_uint8` is nD, n >= 4.\n if rgb_uint8.ndim != 3:\n raise ValueError('`rgb_uint8.ndim` is not equal to 3.')\n \n # If the check below did not exist, `rgb_to_ycbcr` would\n # not crash if `rgb_uint8.shape[2]` is larger than 4.\n if rgb_uint8.shape[2] != 3:\n raise ValueError('`rgb_uint8.shape[2]` is not equal to 3.')\n rgb_float64 = rgb_uint8.astype(numpy.float64)\n y_float64 = 0.299*rgb_float64[:, :, 0] \\\n + 0.587*rgb_float64[:, :, 1] \\\n + 0.114*rgb_float64[:, :, 2]\n cb_float64 = 128. \\\n - (0.299/1.772)*rgb_float64[:, :, 0] \\\n - (0.587/1.772)*rgb_float64[:, :, 1] \\\n + (0.886/1.772)*rgb_float64[:, :, 2]\n cr_float64 = 128. \\\n + (0.701/1.402)*rgb_float64[:, :, 0] \\\n - (0.587/1.402)*rgb_float64[:, :, 1] \\\n - (0.114/1.402)*rgb_float64[:, :, 2]\n ycbcr_float64 = numpy.stack((y_float64, cb_float64, cr_float64),\n axis=2)\n return cast_float_to_uint8(ycbcr_float64)", "def ycbcr_to_rgb(ycbcr):\n transform = np.matrix('.299, .587, .114; -.16874, -.33126, .5; .5, -.41869, -.08131')\n inverse = transform.getI()\n\n def apply_transform(ycbcr):\n return np.array(np.dot(inverse, ycbcr))[0]\n\n return np.apply_along_axis(apply_transform, 2, ycbcr)", "def rgb_to_ycbcr(img):\n\n T = np.array([\n [0.256788235294118, -0.148223529411765, 0.439215686274510],\n [0.504129411764706, -0.290992156862745, -0.367788235294118],\n [0.097905882352941, 0.439215686274510, -0.071427450980392],\n ], dtype=np.float64)\n\n O = np.array([16, 128, 128], dtype=np.float64)\n\n img = img.astype(np.float64)\n res = np.matmul(img, T) + O\n res = res.clip(0, 255).round().astype(np.uint8)\n\n return res", "def _rgb2y(self, im):\n if len(im.shape) < 3:\n return im\n return np.sum(im * [0.299, 0.587, 0.114], axis=2)", "def ycbcr_to_rgb(ycbcr_uint8):\n if ycbcr_uint8.dtype != numpy.uint8:\n raise TypeError('`ycbcr_uint8.dtype` is not equal to `numpy.uint8`.')\n \n # If the check below did not exist, `ycbcr_to_rgb` would\n # not crash if `ycbcr_uint8` is nD, n >= 4.\n if ycbcr_uint8.ndim != 3:\n raise ValueError('`ycbcr_uint8.ndim` is not equal to 3.')\n \n # If the check below did not exist, `ycbcr_to_rgb` would\n # not crash if `ycbcr_uint8.shape[2]` is larger than 4.\n if ycbcr_uint8.shape[2] != 3:\n raise ValueError('`ycbcr_uint8.shape[2]` is not equal to 3.')\n ycbcr_float64 = ycbcr_uint8.astype(numpy.float64)\n red_float64 = ycbcr_float64[:, :, 0] \\\n + 1.402*(ycbcr_float64[:, :, 2] - 128.)\n green_float64 = ycbcr_float64[:, :, 0] \\\n - (0.114*1.772*(ycbcr_float64[:, :, 1] - 128.)/0.587) \\\n - (0.299*1.402*(ycbcr_float64[:, :, 2] - 128.)/0.587)\n blue_float64 = ycbcr_float64[:, :, 0] \\\n + 1.772*(ycbcr_float64[:, :, 1] - 128.)\n rgb_float64 = numpy.stack((red_float64, green_float64, blue_float64),\n axis=2)\n return cast_float_to_uint8(rgb_float64)", "def generate_channels(path):\n # Abrir imagen y transformar a array\n image = Image.open(path)\n img_array = np.array(image)\n \n # Sacar RGB\n R = img_array[..., 0]\n G = img_array[..., 1]\n B = img_array[..., 2]\n \n return (R, G, B)", "def bgr_to_yuv_channels(matrix):\n yuv_matrix = cv2.cvtColor(matrix, cv2.COLOR_BGR2YUV)\n return cv2.split(yuv_matrix)", "def save_images(inputY, inputCbCr, size, image_path):\n def merge(images, size):\n h, w = images.shape[1], images.shape[2]\n img = np.zeros((h * size[0], w * size[1], 3))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j*h:j*h+h, i*w:i*w+w, :] = image\n return img\n\n inputY = inputY.astype('uint8')\n inputCbCr = inputCbCr.astype('uint8')\n output_concat = np.concatenate((inputY, inputCbCr), axis=3)\n\n assert len(output_concat) <= size[0] * size[1], \"number of images should be equal or less than size[0] * size[1] {}\".format(len(output_concat))\n\n new_output = merge(output_concat, size)\n\n new_output = new_output.astype('uint8')\n\n img = Image.fromarray(new_output, mode='YCbCr')\n img = img.convert('RGB')\n img.save(image_path)", "def rgb_to_bit(rgb_array):\r\n return [[round_color(elem) for elem in row] for row in rgb_array]", "def RGB2BGR(x):\n out = cv2.cvtColor(x, cv2.COLOR_RGB2BGR)\n return out", "def yuv_channels_to_bgr_image(y_channel, u_channel, v_channel):\n yuv_image = cv2.merge((y_channel.astype(np.float32), u_channel.astype(np.float32), v_channel.astype(np.float32)))\n bgr_image = cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR)\n return bgr_image", "def bgr_to_ycrcb(matrix3d: np.ndarray) -> np.ndarray:\n return np.apply_along_axis(bgr_pixel_to_ycrcb, 2, matrix3d)", "def brc(x):\n N = len(x)\n y = np.zeros(N, dtype=complex)\n width = int(np.log2(N))\n for ii in np.arange(N):\n idx = '{:0{width}b}'.format(ii, width=width)\n y[ii] = x[int(idx[::-1],2)]#Reverse order of bits of integer ii\n return y", "def skin_detect_ycbcr(frame):\n Cr_min, Cr_max, Cb_min, Cb_max = 133, 150, 77, 127\n # Constants for finding range of skin color in YCrCb\n min_YCrCb = np.array([0,Cr_min,Cb_min], np.uint8)\n max_YCrCb = np.array([255,Cr_max,Cb_max], np.uint8)\n\n # Convert image to YCrCb\n imageYCrCb = cv2.cvtColor(frame, cv2.COLOR_BGR2YCR_CB)\n # Find region with skin tone in YCrCb image\n skinRegion = cv2.inRange(imageYCrCb, min_YCrCb, max_YCrCb) \n # Do contour detection on skin region\n _, contours, hierarchy = cv2.findContours(skinRegion, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n return imageYCrCb, contours, hierarchy", "def yuv_to_rgb(img_yuv):\n\n y = img_yuv[..., 0]\n u = img_yuv[..., 1]\n v = img_yuv[..., 2]\n\n r = y + 1.14 * v\n g = y - 0.396 * u - 0.581 * v\n b = y + 2.029 * u\n\n img_rgb = np.stack((r, g, b), axis=2)\n img_rgb = np.clip(img_rgb, 0, 1)\n return img_rgb", "def yuv2bgr(tens: Tensor) -> Tensor:\n if not _is_yuv_image(tens):\n raise ValueError(\n f\"Tensor of shape 3 expected. Found shape {len(tens.shape)}. \"\n \"This function converts an YUV Tensor to its BGR counterpart\"\n )\n\n img = cv.cvtColor(tens, YUV2BGR)\n return to_tensor(img, cspace=\"bgr\")", "def yuv_to_ycbcr(yuv, bit_depth=10):\n\n bit_multi = 2 ** (bit_depth - 8)\n y_coef = 219 * bit_multi\n y_offset = 16 * bit_multi\n cbcr_coef = 224 * bit_multi\n cbcr_offset = 128 * bit_multi\n\n ycbcr = yuv.copy()\n ycbcr[:, 0] = np.round(ycbcr[:, 0] * y_coef + y_offset)\n ycbcr[:, 1] = np.round(ycbcr[:, 1] * cbcr_coef + cbcr_offset)\n ycbcr[:, 2] = np.round(ycbcr[:, 2] * cbcr_coef + cbcr_offset)\n\n return ycbcr", "def convert_yuv_to_rgb(img_arr): \n rgb = cv2.cvtColor(img_arr, cv2.COLOR_YUV2BGR_I420)\n rgb = cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB)\n return Image.fromarray(rgb)", "def yiq2rgb(imYIQ):\n trans = np.array([[1, 0.956, 0.62], [1, -0.272, -0.647], [1, -1.108, 1.705]])\n return np.dot(imYIQ, trans)", "def ycbcr_to_yuv(ycbcr, bit_depth=10):\n\n bit_multi = 2 ** (bit_depth - 8)\n y_coef = 219 * bit_multi\n y_offset = 16 * bit_multi\n cbcr_coef = 224 * bit_multi\n cbcr_offset = 128 * bit_multi\n\n ycbcr_tmp = ycbcr.copy()\n ycbcr_tmp[:, 0] = (ycbcr_tmp[:, 0] - y_offset) / y_coef\n ycbcr_tmp[:, 1] = (ycbcr_tmp[:, 1] - cbcr_offset) / cbcr_coef\n ycbcr_tmp[:, 2] = (ycbcr_tmp[:, 2] - cbcr_offset) / cbcr_coef\n\n return ycbcr_tmp", "def transformRGB2YIQ(imgRGB: np.ndarray) -> np.ndarray:\r\n YIQ_from_RGB = np.array([[0.299, 0.587, 0.114],\r\n [0.59590059, -0.27455667, -0.32134392],\r\n [0.21153661, -0.52273617, 0.31119955]])\r\n YIQImg = np.ndarray(imgRGB.shape)\r\n\r\n YIQImg[:, :, 0] = YIQ_from_RGB[0,0] * imgRGB[:, :, 0] + YIQ_from_RGB[0,1] * imgRGB[:, :, 1] + YIQ_from_RGB[0,2] * imgRGB[:, :, 2]\r\n YIQImg[:, :, 1] = YIQ_from_RGB[1,0] * imgRGB[:, :, 0] + YIQ_from_RGB[1,1] * imgRGB[:, :, 1] + YIQ_from_RGB[1,2] * imgRGB[:, :, 2]\r\n YIQImg[:, :, 2] = YIQ_from_RGB[2,0] * imgRGB[:, :, 0] + YIQ_from_RGB[2,1] * imgRGB[:, :, 1] + YIQ_from_RGB[2,2] * imgRGB[:, :, 2]\r\n\r\n return YIQImg", "def convert_6_channels_to_rgb(channels):\n base = channels[0]\n # qual is the minimum of base quality and mapping quality at each position\n # 254 is the max value for quality scores because the SAM specification has\n # 255 reserved for unavailable values.\n qual = np.minimum(channels[1], channels[2])\n strand = channels[3]\n # alpha is <supports variant> * <base != reference>\n alpha = np.multiply(channels[4] / 254.0, channels[5] / 254.0)\n return np.multiply(np.stack([base, qual, strand]),\n alpha).astype(np.uint8).transpose([1, 2, 0])", "def rgb2yuv(image):\n return cv2.cvtColor(image, cv2.COLOR_RGB2YUV)", "def rgb2yuv(r, g, b, mode='444'):\n r = 255 * r\n g = 255 * g\n b = 255 * b\n y = 00.257 * r + 0.504 * g + 0.098 * b + 16\n u = -0.148 * r - 0.291 * g + 0.439 * b + 128\n v = 00.439 * r - 0.368 * g - 0.071 * b + 128\n if mode == '420':\n y, u, v = YUV_change_mode(y, u, v, '444to420')\n return (y / 255), (u / 255), (v / 255)", "def yiq2rgb(imYIQ):\n return np.dot(imYIQ, np.linalg.inv(np.array(MATRIX).T))", "def yuv_bytes(self):\n r, g, b = self.rgb_bytes\n return (\n (( 66 * r + 129 * g + 25 * b + 128) >> 8) + 16,\n ((-38 * r - 73 * g + 112 * b + 128) >> 8) + 128,\n ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128,\n )", "def toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None,\n mode=None, channel_axis=None):\n #####CRIAR BATCHES COM POTÊNCIAS DE 2 PARA RESOLVER O PROBLEMA DE 450 SAMPLES DA\n data = np.asarray(arr)\n if np.iscomplexobj(data):\n raise ValueError(\"Cannot convert a complex-valued array.\")\n shape = list(data.shape)\n valid = len(shape) == 2 or ((len(shape) == 3) and\n ((3 in shape) or (4 in shape)))\n if not valid:\n raise ValueError(\"'arr' does not have a suitable array shape for \"\n \"any mode.\")\n if len(shape) == 2:\n shape = (shape[1], shape[0]) # columns show up first\n if mode == 'F':\n data32 = data.astype(np.float32)\n image = Image.frombytes(mode, shape, data32.tostring())\n return image\n if mode in [None, 'L', 'P']:\n bytedata = bytescale(data, high=high, low=low,\n cmin=cmin, cmax=cmax)\n image = Image.frombytes('L', shape, bytedata.tostring())\n if pal is not None:\n image.putpalette(np.asarray(pal, dtype=np.uint8).tostring())\n # Becomes a mode='P' automagically.\n elif mode == 'P': # default gray-scale\n pal = (np.arange(0, 256, 1, dtype=np.uint8)[:, np.newaxis] *\n np.ones((3,), dtype=np.uint8)[np.newaxis, :])\n image.putpalette(np.asarray(pal, dtype=np.uint8).tostring())\n return image\n if mode == '1': # high input gives threshold for 1\n bytedata = (data > high)\n image = Image.frombytes('1', shape, bytedata.tostring())\n return image\n if cmin is None:\n cmin = np.amin(np.ravel(data))\n if cmax is None:\n cmax = np.amax(np.ravel(data))\n data = (data*1.0 - cmin)*(high - low)/(cmax - cmin) + low\n if mode == 'I':\n data32 = data.astype(np.uint32)\n image = Image.frombytes(mode, shape, data32.tostring())\n else:\n raise ValueError(_errstr)\n return image\n\n # if here then 3-d array with a 3 or a 4 in the shape length.\n # Check for 3 in datacube shape --- 'RGB' or 'YCbCr'\n if channel_axis is None:\n if (3 in shape):\n ca = np.flatnonzero(np.asarray(shape) == 3)[0]\n else:\n ca = np.flatnonzero(np.asarray(shape) == 4)\n if len(ca):\n ca = ca[0]\n else:\n raise ValueError(\"Could not find channel dimension.\")\n else:\n ca = channel_axis\n\n numch = shape[ca]\n if numch not in [3, 4]:\n raise ValueError(\"Channel axis dimension is not valid.\")\n\n bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax)\n if ca == 2:\n strdata = bytedata.tostring()\n shape = (shape[1], shape[0])\n elif ca == 1:\n strdata = np.transpose(bytedata, (0, 2, 1)).tostring()\n shape = (shape[2], shape[0])\n elif ca == 0:\n strdata = np.transpose(bytedata, (1, 2, 0)).tostring()\n shape = (shape[2], shape[1])\n if mode is None:\n if numch == 3:\n mode = 'RGB'\n else:\n mode = 'RGBA'\n\n if mode not in ['RGB', 'RGBA', 'YCbCr', 'CMYK']:\n raise ValueError(_errstr)\n\n if mode in ['RGB', 'YCbCr']:\n if numch != 3:\n raise ValueError(\"Invalid array shape for mode.\")\n if mode in ['RGBA', 'CMYK']:\n if numch != 4:\n raise ValueError(\"Invalid array shape for mode.\")\n\n # Here we know data and mode is correct\n image = Image.frombytes(mode, shape, strdata)\n return image", "def convert_rgb_cmyk(rcol, gcol, bcol):\n if (rcol == 0) and (gcol == 0) and (bcol == 0):\n # black\n return 0, 0, 0, 1\n\n kcol = 1-max(rcol, gcol, bcol)\n ccol = (1-rcol-kcol)/(1-kcol)\n mcol = (1-gcol-kcol)/(1-kcol)\n ycol = (1-bcol-kcol)/(1-kcol)\n\n return ccol, mcol, ycol, kcol", "def transformYIQ2RGB(imgYIQ: np.ndarray) -> np.ndarray:\r\n yiq_from_rgb = np.array([[0.299, 0.587, 0.114],\r\n [0.59590059, -0.27455667, -0.32134392],\r\n [0.21153661, -0.52273617, 0.31119955]])\r\n rgb_from_yiq = np.linalg.inv(yiq_from_rgb)\r\n\r\n RGBImg = np.ndarray(imgYIQ.shape)\r\n\r\n RGBImg[:, :, 0] = rgb_from_yiq[0,0] * imgYIQ[:, :, 0] + rgb_from_yiq[0,1] * imgYIQ[:, :, 1] + rgb_from_yiq[0,2] * imgYIQ[:, :, 2]\r\n RGBImg[:, :, 1] = rgb_from_yiq[1,0] * imgYIQ[:, :, 0] + rgb_from_yiq[1,1] * imgYIQ[:, :, 1] + rgb_from_yiq[1,2] * imgYIQ[:, :, 2]\r\n RGBImg[:, :, 2] = rgb_from_yiq[2,0] * imgYIQ[:, :, 0] + rgb_from_yiq[2,1] * imgYIQ[:, :, 1] + rgb_from_yiq[2,2] * imgYIQ[:, :, 2]\r\n\r\n return RGBImg", "def _convert_to_yolo_img(self, img):\n\n img = img / 255.0\n h, w, c = img.shape\n img = img.transpose(2, 0, 1)\n outimg = make_image(w, h, c)\n img = img.reshape((w*h*c))\n data = c_array(c_float, img)\n outimg.data = data\n rgbgr_image(outimg)\n return outimg", "def decode_frame(self, buf):\n import numpy as np\n from cv2 import cvtColor\n\n w, h = self._resolution\n arr = np.fromstring(buf, 'uint8').reshape((h + h / 2, w))\n arr = cvtColor(arr, 93) # NV21 -> BGR\n return arr", "def yiq2rgb(im_yiq):\n return multiply_by_left_matrix(np.linalg.inv(YIQ_MATRIX), im_yiq)", "def yiq2rgb(imYIQ):\n return np.dot(imYIQ, np.linalg.inv(TRANSFORM).T.copy())", "def yiq2rgb(imYIQ):\n return __image_color_conversion(imYIQ, YIQ_TO_RGB_MATRIX)", "def rgb_to_yuv(img_rgb):\n\n r = img_rgb[..., 0]\n g = img_rgb[..., 1]\n b = img_rgb[..., 2]\n\n y = 0.299 * r + 0.587 * g + 0.114 * b\n u = 0.493 * (b - y)\n v = 0.877 * (r - y)\n\n img_yuv = np.stack((y, u, v), axis=2)\n return img_yuv", "def to_blue(array):\n result = np.zeros(array.shape)\n\n for row in range(result.shape[0]):\n for pixel in range(result.shape[1]):\n result[row][pixel][2] = array[row][pixel][2]\n\n return result", "def get_channels(hexcode):\n assert len(hexcode) in (7, 9)\n assert hexcode[0] == \"#\"\n rgb = hexcode[1:3], hexcode[3:5], hexcode[5:7], hexcode[7:]\n rgb = [int(x, 16) for x in rgb if x != \"\"]\n return np.array(rgb, dtype=np.uint8)", "def blue_channel(img):\n\n blue = np.zeros(img.shape,dtype=float)\n\n blue[:,:,0] = np.copy(img[:,:,0])\n\n return blue", "def convert_to_numpy(color_frame, aligned_depth_frame):\n depth_image = np.asanyarray(aligned_depth_frame.get_data())\n frame = np.asanyarray(color_frame.get_data())\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n return frame, depth_image", "def bitmap(arr, dc):\n wiringPy.digital_write(pin_DC, dc)\n wiringPy.digital_write_serial_array(0, struct.pack('B'*len(arr), *arr))", "def rgb2yiq(imRGB):\n return __image_color_conversion(imRGB, RGB_TO_YIQ_MATRIX)", "def rgb2Lab(rgbvalue):\r\n RGB2Lab_Matrix = np.array([[0.412453, 0.357580, 0.180423],\r\n [0.212671, 0.715160, 0.072169],\r\n [0.019334, 0.119193, 0.950227]])\r\n R = rgbvalue[0]\r\n G = rgbvalue[1]\r\n B = rgbvalue[2]\r\n gammaR = gamma(R / 255.0)\r\n gammaG = gamma(G / 255.0)\r\n gammaB = gamma(B / 255.0)\r\n RGBvalue = np.array([gammaR, gammaG, gammaB])\r\n RGBvalue = RGBvalue.reshape(3, 1)\r\n XYZvalue = np.dot(RGB2Lab_Matrix, RGBvalue)\r\n assert XYZvalue.shape == (3, 1)\r\n correction = np.array([[1.0 / 0.950456, 1.0, 1.0 / 1.088754]]).T\r\n assert correction.shape == (3, 1)\r\n XYZ = XYZvalue * correction\r\n assert XYZ.shape == (3, 1)\r\n YYn = ft(XYZ[1])\r\n XXn = ft(XYZ[0])\r\n ZZn = ft(XYZ[2])\r\n L = 116 * YYn - 16\r\n a = 500 * (XXn - YYn)\r\n b = 200 * (YYn - ZZn)\r\n return [int(L), int(a), int(b)]", "def greyScaleConversion(frame):\n return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)", "def denormalize_frames(frames):\n new_frames = frames + 1\n new_frames *= (255 / 2)\n # noinspection PyUnresolvedReferences\n new_frames = new_frames.astype(np.uint8)\n\n return new_frames", "def rbg_to_bayer_bg(image: np.ndarray) -> np.ndarray:\n # there is only one channel but it still needs the third dimension, so that\n # the conversion to a cv::Mat in C++ is easier\n bayer_img = np.zeros((image.shape[0], image.shape[1], 1), dtype=np.uint8)\n\n # channel names, assuming input is RGB\n CHANNEL_RED = 0\n CHANNEL_GREEN = 1\n CHANNEL_BLUE = 2\n\n # channel map to get the following pattern (called \"BG\" in OpenCV):\n #\n # RG\n # GB\n #\n channel_map = {\n (0, 0): CHANNEL_RED,\n (1, 0): CHANNEL_GREEN,\n (0, 1): CHANNEL_GREEN,\n (1, 1): CHANNEL_BLUE,\n }\n\n for r in range(image.shape[0]):\n for c in range(image.shape[1]):\n channel = channel_map[(r % 2, c % 2)]\n bayer_img[r, c] = image[r, c, channel]\n\n return bayer_img", "def rgb2lab(r, g, b):\n r, g, b = r / 255.0, g / 255.0, b / 255.0\n\n # http://www.brucelindbloom.com/index.html?Math.html\n # Inverse sRGB Companding\n r = r / 12.92 if r <= 0.04045 else ((r + 0.055) / 1.055) ** 2.4\n g = g / 12.92 if g <= 0.04045 else ((g + 0.055) / 1.055) ** 2.4\n b = b / 12.92 if b <= 0.04045 else ((b + 0.055) / 1.055) ** 2.4\n\n # http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html\n # sRGB, D65\n x = r * 0.4124564 + g * 0.3575761 + b * 0.1804375\n y = r * 0.2126729 + g * 0.7151522 + b * 0.0721750\n z = r * 0.0193339 + g * 0.1191920 + b * 0.9503041\n\n # http://www.brucelindbloom.com/index.html?Eqn_XYZ_to_Lab.html\n kappa, epsilon = 903.3, 0.008856\n\n # http://brucelindbloom.com/index.html?Eqn_ChromAdapt.html\n # White point for D65\n xr, yr, zr = x / 0.95047, y / 1.00000, z / 1.08883\n\n fx = xr ** (1 / 3.0) if xr > epsilon else (kappa * xr + 16) / 116.0\n fy = yr ** (1 / 3.0) if yr > epsilon else (kappa * yr + 16) / 116.0\n fz = zr ** (1 / 3.0) if zr > epsilon else (kappa * zr + 16) / 116.0\n\n l = 166.0 * fy - 16.0\n a = 500.0 * (fx - fy)\n b = 200.0 * (fy - fz)\n\n return l, a, b", "def data_to_bytescale_rgb(data): # used to create the SOURCE PNGs (MRI, FA, MD)\n im = bytescale(data)\n w, h = im.shape\n ret = np.empty((w,h,3), dtype=np.uint8)\n ret[:,:,0] = im\n ret[:,:,1] = im\n ret[:,:,2] = im\n return ret", "def rgb2yiq(im_rgb):\n return multiply_by_left_matrix(YIQ_MATRIX, im_rgb)", "def get_color_codes(y):\n y = column_or_1d(y)\n\n # inliers are assigned blue\n c = np.full([len(y)], 'b', dtype=str)\n outliers_ind = np.where(y == 1)\n\n # outlier are assigned red\n c[outliers_ind] = 'r'\n\n return c", "def gray2rgb(rgb, y, im_yiq):\n if rgb: # if the original image was RGB, then convert back to RGB format\n im_yiq[:, :, 0] = y/(BITS - 1)\n y = np.clip(yiq2rgb(im_yiq), 0, 1)\n else:\n y = (y/(BITS - 1)).astype(np.float64)\n return y", "def rescale_image_0255(image):\n # scale image to from [0.0, 1.0] to [0, 255]\n image *= 255\n return image.astype(np.uint8)", "def get_d65_spectrum():\n\n filename = os.path.dirname(os.path.abspath(__file__))\\\n + os.path.normpath(\"/data/d65_spectrum.csv\")\n data = np.loadtxt(filename, delimiter=',', skiprows=1).T\n\n return np.uint16(data[0]), data[1]", "def convertData(img):\n dataset = []\n for i in img:\n dataset.append(format(ord(i), '08b'))\n return dataset", "def colorPaletteToRGB(image_data,color_table): \n color_table_array = numpy.array([ord(c) for c in color_table])\n n_colors = color_table_array.size / 3\n color_table_array = color_table_array.reshape((n_colors,3))\n channels = [color_table_array[image_data,i] for i in range(3)]\n return channels", "def rgb2gbc(rgb):\n c = (rgb[0] >> 3) + ((rgb[1] >> 3) << 5) + ((rgb[2] >> 3) << 10)\n return c", "def convert_color(img, conv='RGB2YCrCb'):\n if conv == 'RGB2YCrCb':\n return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n if conv == 'BGR2YCrCb':\n return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)\n if conv == 'RGB2LUV':\n return cv2.cvtColor(img, cv2.COLOR_RGB2LUV)", "def rgb2yiq(imRGB):\n return np.dot(imRGB, TRANSFORM.T.copy())", "def unpack_argb8(value):\n a = ((value >> 6) & 0x03) * 0x55\n r = ((value >> 4) & 0x03) * 0x55\n g = ((value >> 2) & 0x03) * 0x55\n b = ((value) & 0x03) * 0x55\n return (r, g, b, a)", "def rgb2gray(rgb):\n # this will translate a uint8 array into a float64 one\n grey = np.dot(rgb[..., :3], [0.299, 0.587, 0.114])\n # transform back if the input is a uint8 array\n if rgb.dtype.type is np.uint8:\n grey = round(grey).astype(np.uint8)\n return grey", "def sRGBToLabD50(rgb):\n return xyzToLab(xyzFromsRGBD50(rgb), [0.9642957, 1, 0.8251046])", "def convert_grayscale_to_rgb(x: np.ndarray) -> np.ndarray:\n return np.stack((x, ) * 3, axis=-1)", "def get_numpy_array(self):\r\n\r\n # This holds the obect's spectral data, and will be passed to\r\n # numpy.array() to create a numpy array (matrix) for the matrix math\r\n # that will be done during the conversion to XYZ.\r\n values = []\r\n\r\n # Use the required value list to build this dynamically. Default to\r\n # 0.0, since that ultimately won't affect the outcome due to the math\r\n # involved.\r\n for val in self.VALUES:\r\n values.append(getattr(self, val, 0.0))\r\n\r\n # Create and the actual numpy array/matrix from the spectral list.\r\n color_array = numpy.array([values])\r\n return color_array", "def rgb2yiq(imRGB):\n trans = np.array([[0.299, 0.587, 0.114], [0.596, -0.275, -0.321], [0.212, -0.523, 0.311]])\n return np.dot(imRGB, trans)", "def rgb2yiq(imRGB):\n return np.dot(imRGB, np.array(MATRIX).T)", "def check_rgb(image):\n im_yiq = []\n rgb = False\n y = image\n if len(image.shape) > 2 and image.shape[-1] == 3: # The image is RGB\n rgb = True\n im_yiq = rgb2yiq(image) # convert to YIQ format\n y = im_yiq[:, :, 0]\n return rgb, y, im_yiq", "def reduceColorRGB(channels,levels):\n data = (levels[2]*levels[1]*reduceColor(channels[0],levels[0])+\n levels[2]*reduceColor(channels[1],levels[1])+\n reduceColor(channels[2],levels[2])).astype(numpy.uint8)\n return data", "def yuv(self):\n r, g, b = self.rgb\n y = 0.299 * r + 0.587 * g + 0.114 * b\n return (\n y,\n 0.492 * (b - y),\n 0.877 * (r - y),\n )", "def one_2_uint8(one_arr):\n assert (one_arr.dtype == 'float' and np.max(one_arr <= 1.0)), \\\n 'improc.one_2_uint8() only accepts floats arrays from 0 to 1.'\n return (255*one_arr).astype('uint8')", "def bgr_to_rgb(ims):\n out = []\n for im in ims:\n out.append(im[:,:,::-1])\n return out", "def decode_image(file_location=\"images/encoded_sample.png\"):\n encoded_image = Image.open(file_location)\n red_channel = encoded_image.split()[0]\n\n\n x_size = encoded_image.size[0]\n y_size = encoded_image.size[1]\n\n\n decoded_image = Image.new(\"RGB\", encoded_image.size)\n pixels = decoded_image.load()\n for x in range(x_size):\n for y in range(y_size):\n red_pixel = red_channel.getpixel((x,y))\n binary = bin(red_pixel)\n\n lsb = int(binary[-1])\n if(lsb == 0):\n pixels[x,y] = (0,0,0)\n elif(lsb == 1):\n pixels[x,y] = (255,255,255)\n\n pass\n decoded_image.save(\"images/decoded_image.png\")", "def rgb_data(video_file, size, nframes=None):\r\n\r\n # Load video into numpy array\r\n w, h, buf = _raw_numpy_array(video_file, nframes=nframes)\r\n\r\n # Scale pixels between -1 and 1\r\n buf[0, :] = ((buf[0, :] / 255.0) * 2) - 1\r\n\r\n # Select center crop from the video\r\n return _crop_video(buf, (w, h), size)", "def lab_to_rgb(img):\n new_img = np.zeros((256, 256, 3))\n for i in range(len(img)):\n for j in range(len(img[i])):\n pix = img[i, j]\n new_img[i, j] = [(pix[0] + 1) * 50, (pix[1] + 1) / 2 * 255 - 128, (pix[2] + 1) / 2 * 255 - 128]\n new_img = color.lab2rgb(new_img) * 255\n new_img = new_img.astype('uint8')\n return new_img", "def convert_to_crf_format(y_tr, y_val):\n # convert data for CRF format\n crf_y_tr = [to_categorical(i, num_classes=2) for i in y_tr]\n crf_y_val = [to_categorical(i, num_classes=2) for i in y_val]\n\n crf_y_tr = np.array(crf_y_tr)\n crf_y_val = np.array(crf_y_val)\n return crf_y_tr, crf_y_val", "def convert_process_buffer(self):\r\n black_buffer = [cv2.resize(cv2.cvtColor(x, cv2.COLOR_RGB2GRAY), (84, 90))\r\n for x in self.process_buffer]\r\n black_buffer = [x[1:85, :, np.newaxis] for x in black_buffer]\r\n return np.concatenate(black_buffer, axis=2)", "def get_rgb_from_bits(self, bits):\n bits.shape = (self._dest_height,self._dest_width,4)\n self._rgb[0] = bits[:,:,2]\n self._rgb[1] = bits[:,:,1]\n self._rgb[2] = bits[:,:,0]\n return self._rgb", "def equalize_BGR_image(image):\n\n b, g, r = cv.split(image)\n b = equalize_image_channel(b)\n g = equalize_image_channel(g)\n r = equalize_image_channel(r)\n return cv.merge((b,g,r))", "def get_rgb(self, img, r, g, b):\r\n\r\n # Get specific bands of hyperspectral image\r\n red_channel = img[:, :, r]\r\n green_channel = img[:, :, g]\r\n blue_channel = img[:, :, b]\r\n\r\n img = np.stack((red_channel, green_channel, blue_channel), axis=2)\r\n img = img.astype('float32')\r\n return img", "def yuv2rgb(tens: Tensor) -> Tensor:\n if not _is_yuv_image(tens):\n raise ValueError(\n f\"Tensor of shape 3 expected. Found shape {len(tens.shape)}.\" \n \"This function converts a YUV Tensor to its RGB counterpart\"\n )\n\n img = cv.cvtColor(tens, YUV2RGB)\n return to_tensor(img, cspace=\"rgb\")", "def _rgb_to_binary(rgb: tuple) -> tuple:\n if len(rgb) != 3:\n raise ValueError(\"RGB must be a tuple with 3 values\")\n\n red, green, blue = tuple(map(int, rgb))\n\n r_binary = format(red, \"08b\")\n g_binary = format(green, \"08b\")\n b_binary = format(blue, \"08b\")\n\n return (r_binary, g_binary, b_binary)", "def srgb_to_rgb(srgb):\n\n ret = np.zeros_like(srgb)\n idx0 = srgb <= 0.04045\n idx1 = srgb > 0.04045\n ret[idx0] = srgb[idx0] / 12.92\n ret[idx1] = np.power((srgb[idx1] + 0.055) / 1.055, 2.4)\n return ret", "def gbc2rgb(c):\n #GBC format: 0bbbbbgggggrrrrr (b-blue, g-green, r-red)\n r = (c % (1 << 5)) << 3\n g = ((c / (1 << 5)) % (1 << 5)) << 3\n b = ((c / (1 << 10)) % (1 << 5)) << 3\n return (r,g,b)", "def create_composite_rgb_values(vk4_container, layer_list):\n log.debug(\"Entering create_composite_rgb_values()\")\n width = vk4_container.image_width\n height = vk4_container.image_height\n comp_rgb_array = np.zeros(width * height, dtype=np.uint32)\n\n for lay in layer_list:\n i = 0\n for rgb in lay:\n comp_rgb_array[i] = comp_rgb_array[i] + ((rgb[0] << 16) + (rgb[1] << 8) + (rgb[2]))\n i = i + 1\n log.debug(\"In create_composite_rgb_values()\\n\\tComposite RGB array:\\n\\t{}\" \\\n .format(comp_rgb_array))\n\n log.debug(\"Exiting create_composite_rgb_values()\")\n return comp_rgb_array", "def convert_image_np(inp):\n inp = inp.numpy().transpose((1, 2, 0))\n inp = (inp*255).astype(np.uint8)\n return inp", "def blue_channel(image: Image) -> Image:\r\n new_image = copy(image)\r\n for x, y, (r, g, b) in image:\r\n r, g, b = 0, 0, b\r\n blue = create_color(r, g, b)\r\n set_color(new_image, x, y, blue)\r\n return new_image", "def to_nibble_array(arr: ndarray) -> ndarray:\n arr = arr.ravel()\n return (arr[::2] + (arr[1::2] << 4)).astype(\"uint8\")", "def RGB_to_CMY(cobj, *args, **kwargs):\r\n \r\n cmy_c = 1.0 - cobj.rgb_r\r\n cmy_m = 1.0 - cobj.rgb_g\r\n cmy_y = 1.0 - cobj.rgb_b\r\n \r\n return CMYColor(cmy_c, cmy_m, cmy_y)", "def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b", "def example_lab_to_rgb():\r\n\r\n print(\"=== RGB Example: Lab->RGB ===\")\r\n # Instantiate an Lab color object with the given values.\r\n lab = LabColor(0.903, 16.296, -2.217)\r\n # Show a string representation.\r\n print(lab)\r\n # Convert to XYZ.\r\n rgb = convert_color(lab, sRGBColor)\r\n print(rgb)\r\n print(\"=== End Example ===\\n\")", "def get_color_in_rgb_decimal():\n\n # Grabbing custom colormap from matplotlib\n a = cm.get_cmap('cool', 32)\n b = cm.get_cmap('spring', 32)\n c = cm.get_cmap('autumn_r', 64)\n d = cm.get_cmap('bwr_r', 192)\n e = cm.get_cmap('Greens', 192)\n\n # Adding the colormaps into one stack to have a more comprehensive color spectrum \n newcolors = np.vstack((a(np.linspace(0, 1, 32)), \n b(np.linspace(0, 1, 32)), \n c(np.linspace(0, 1, 64)),\n d(np.linspace(0, 0.5, 192)),\n e(np.linspace(0, 1, 192)),\n ))\n return newcolors", "def decode_image(path):\n\n img = Image.open(path)\n image_width = img.width\n image_height = img.height\n pixels = np.array(img)\n\n print(pixels[0])\n bits = []\n\n for i in range(image_height):\n for j in range(image_width):\n bits.append(pixels[i][j][0] & ((1 << 1) - 1))\n\n bytes_l = [int(\"\".join(map(str, bits[i:i + 8])), 2) for i in range(0, len(bits), 8)]\n decoded_message = ''.join(map(chr, bytes_l))\n img.close()\n\n return decoded_message", "def compress(block):\n\n # Transform RGB to YCbCr\n yc_bl = np.zeros((8, 8, 3), dtype=np.int8)\n \n for i in range(8):\n for j in range(8):\n rgb_cmp = np.asmatrix(block[i][j])\n y,cb,cr = (np.array((rgb_cmp*yc_mat+yc_pad).astype(np.uint8))[0]-128).astype(np.int8)\n yc_bl[i][j] = np.array([y, cb, cr])\n \n # Switch YCbCr block to 3 block for each Y, Cb, Cr component and calculate DCT for them\n y_dct = sf.dct(yc_bl[:,:,0], norm='ortho')\n cb_dct = sf.dct(yc_bl[:,:,1], norm='ortho')\n cr_dct = sf.dct(yc_bl[:,:,2], norm='ortho')\n \n # From DCT data to quantization data\n y_quant = np.round(y_dct / quant_tbl).astype(np.int8)\n cb_quant = np.round(cb_dct / quant_tbl).astype(np.int8)\n cr_quant = np.round(cr_dct / quant_tbl)).astype(np.int8)\n \n # Convert 8x8 block to zigzag 1x64 block\n y_zz = zig_zag(y_quant)\n cb_zz = zig_zag(cb_quant)\n cr_zz = zig_zag(cr_quant)\n \n # Calc DC and AC, put together to list\n y_cmp, cb_cmp, cr_cmp = dc_and_ac_calc(y_zz, cb_zz, cr_zz)\n \n # Encode using entropy coding\n y_encode = encode(y_cmp)\n cb_encode = encode(cb_cmp)\n cr_encode = encode(cr_cmp)\n \n return [y_encode, cb_encode, cr_encode]", "def _rgb2plot(self, data):\n\n mindata, maxdata = np.percentile(data[np.isfinite(data)], (0.01, 99))\n return np.clip((data - mindata) / (maxdata-mindata) * 255, 0, 255).astype(np.uint8)", "def rgb2gray(data):\n grey = np.expand_dims(np.dot(data, [0.2990, 0.5870, 0.1140]), axis=3)\n return grey", "def decompress(input_file):\n\n with open(input_file, 'rb') as f:\n data=str(f.read())\n f.close()\n\n comp = []\n for i in range(0,len(data),2):\n bit = data[i:i+2]\n if bit == '00':\n comp.append(0.)\n elif bit == '10':\n comp.append(1.)\n elif bit == '01':\n comp.append(2.)\n elif bit == '11':\n comp.append(3.)\n im_mat = np.array(comp)\n return im_mat", "def processing_data(raw_data):\n data = np.frombuffer(raw_data, np.uint8)\n data = np.reshape(data, [data.shape[0]//1029, -1])\n data = data[:, 5:]\n data = np.reshape(data, [1, -1])\n data = 256 * data[0, 0::2] + data[0, 1::2]\n data = 10 * (data / 65535)\n data = np.reshape(data, [-1, 8]).T\n return data" ]
[ "0.65595174", "0.65373284", "0.65213215", "0.6495782", "0.64475703", "0.64128786", "0.6219854", "0.6115602", "0.5978177", "0.5957845", "0.59014165", "0.5828215", "0.5691829", "0.5660269", "0.5658129", "0.56305027", "0.5622264", "0.5549955", "0.55226725", "0.5519686", "0.55096513", "0.5500089", "0.549074", "0.54538333", "0.5449599", "0.5416644", "0.53697497", "0.53483725", "0.5328446", "0.53073555", "0.5303888", "0.53014266", "0.5267656", "0.5261026", "0.52596295", "0.522541", "0.52253747", "0.5213645", "0.5206023", "0.52016205", "0.5177284", "0.5173848", "0.51214856", "0.51128536", "0.508369", "0.5051437", "0.50445735", "0.50423336", "0.5011521", "0.5009259", "0.5001473", "0.50013727", "0.4998725", "0.4998045", "0.49974203", "0.49966455", "0.4987786", "0.49725106", "0.49571088", "0.49527344", "0.49362826", "0.49296033", "0.4926603", "0.49190572", "0.4912573", "0.49051842", "0.49050376", "0.48907954", "0.48852393", "0.48817566", "0.48767316", "0.48762476", "0.4872503", "0.48715737", "0.48558336", "0.48525906", "0.48515928", "0.4851547", "0.48435378", "0.4835947", "0.48346478", "0.48223484", "0.48091403", "0.48077622", "0.48065215", "0.48016492", "0.47996882", "0.47973093", "0.4795029", "0.47938573", "0.47897148", "0.47757423", "0.4755372", "0.47506994", "0.474476", "0.47398785", "0.47367212", "0.47344723", "0.47326446", "0.47289106" ]
0.6613111
0
Returns a triangular matrix with random value between 0 and 1 uniformly.
def random_triangular_matrix(size: int, lower: bool = True) -> np.ndarray: a = np.random.uniform(0, 1, (size, size)) if lower: ind = np.triu_indices(5, 1) else: ind = np.tril_indices(5, 1) a[ind] = 0 return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_rand_mat(dim=3):\n tmp = npr.uniform(-1, 1, (dim,dim))\n\n # make matrix symmetric\n for i in range(dim):\n for j in range(i+1, dim):\n tmp[i,j] = tmp[j,i]\n\n return tmp", "def random_matrix(rows, cols):\n return np.random.randn(rows, cols)", "def sample_matrix(dim, bound):\n return np.random.uniform(low=-bound, high=bound, size=(dim, dim))", "def irandmatrix(n, range = 10):\n A = mp.matrix(n, n)\n for i in xrange(n):\n for j in xrange(n):\n A[i,j]=int( (2 * mp.rand() - 1) * range)\n return A", "def generate_random_matrix(n):\n return [[random.randint(1, 50) for i in range(n)] for j in range(n)]", "def generate_matrix(size) -> np.ndarray:\n np.random.seed(1)\n return np.random.rand(size, size) - 0.5", "def generate_board(rows, cols):\n aux = np.zeros((rows, cols))\n for i in range(rows):\n for j in range(cols):\n if np.random.random() < 0.5:\n aux[i][j] = 1\n return aux", "def generate_matrix(rows, cols):\n matrix_random = np.random.rand(rows, cols)\n return matrix_random", "def random_transition_matrix(n: int) -> np.ndarray:\n\n x = np.abs(np.random.normal(size=(n, n)))\n rsum = x.sum(axis=1)\n return x / rsum[:, np.newaxis]", "def create_matrix(size):\n total_size = size * size\n rand_matrix = np.reshape(\n np.random.choice(\n [0, 1], int(total_size), p=[0.9, 0.1]\n ),\n (size, size)\n )\n return rand_matrix", "def generate_random_matrix(dim):\n\n A = np.complex128(np.random.random([dim, dim]))\n A_adjoint = A.conj().T\n\n P = A @ A_adjoint\n P += np.identity(len(P))\n\n P_inverse = np.linalg.inv(P)\n\n return P_inverse", "def rand(cls):\n q_vec = np.random.rand(4)\n q=Quat(q_vec)\n q.normalize()\n return q", "def uniform_weights(n):\n return np.ones((n, 1)) / n", "def matrix_generate(n):\n a = np.eye(n)\n max = 0\n for i in range(n):\n for j in range(n):\n a[i][j] = random.randint(0,50)\n a[j][i] = a[i][j]\n if a[i][j] > max:\n max = a[i][j]\n for i in range(n):\n a[i][i] = max * n + random.randint(20,40)\n return np.array(a)", "def WeightInitializer():\n return np.random.uniform(-1, 1)", "def _random_not_singular(N):\n data = np.zeros((1, 1))\n while np.linalg.det(data) == 0:\n data = np.random.random((N, N)) + \\\n 1j * np.random.random((N, N)) - (0.5 + 0.5j)\n return data", "def get_random_uniform(m,n):\n\n return 2*np.random.random(size=(m,n)) - 1", "def random_rotation_matrix():\n\n x = np.random.uniform(size=3)\n theta = x[0]*2*math.pi\n phi = x[1]*2*math.pi\n z = x[2]*2\n\n r = math.sqrt(z)\n vx = math.sin(phi)*r\n vy = math.cos(phi)*r\n vz = math.sqrt(2.0-z)\n\n st = math.sin(theta)\n ct = math.cos(theta)\n\n sx = vx*ct-vy*st\n sy = vx*st+vy*ct\n\n return np.array([[vx*sx-ct, vx*sy-st, vx*vz],\n [vy*sx+st, vy*sy-ct, vy*vz],\n [vz*sx,vz*sy,1.0-z]])", "def makeRandom(cls, m, n, min=0, max=1):\n Matrix.validate_dimensions(m, n)\n data = [[randrange(min, max) for j in range(n)] for i in range(m)]\n return RealMatrix(m, n, data)", "def generate_onehot_matrix(n: int = 1024, ndim: int = 8, random_seed: int = None) -> TYPE_ARRAY:\n to_vec = lambda x: [1 if i == x else 0 for i in range(ndim)]\n return numpy.array([to_vec(x) for x in _RNG.randint(0, ndim, n)]).astype(int)", "def generatePattern(numCols=100, minOnes=21, maxOnes=25):\n assert minOnes < maxOnes\n assert maxOnes < numCols\n\n nOnes = random.randint(minOnes, maxOnes)\n ind = random.sample(xrange(numCols), nOnes)\n x = numpy.zeros(numCols, dtype='float32')\n x[ind] = 1\n\n return x", "def init_matrix(x_dim = 10, y_dim = 10):\n ret = np.zeros((x_dim, y_dim))\n x_rand = np.random.randint(0, x_dim - 1)\n y_rand = np.random.randint(0, y_dim - 1)\n ret[x_rand, y_rand] = 1\n\n return(ret)", "def rand(self, x):\r\n return np.random.random(1)[0]", "def simple_genotype_matrix(n, p):\n genotypes = np.zeros(shape=(n, p))\n for item in range(0, p):\n genotypes[:, item] = np.random.binomial(1, np.random.uniform(0.1, 0.5, 1), n)\n\n return genotypes", "def random(cls):\n return cls(np.random.randn(3)).normalized()", "def get_rnd_simplex(dimension, random_state):\n t = random_state.uniform(0, 1, dimension - 1)\n t = np.append(t, [0, 1])\n t.sort()\n\n return np.array([(t[i + 1] - t[i]) for i in range(len(t) - 1)])", "def _make_random_matrix(self, n_components, n_features):\n #random_state = check_random_state(self.random_state)\n return _gaussian_random_matrix(\n n_components, n_features, random_state=self.random_state\n )", "def generate_s_matrix(number: int):\n matrix_zero = np.ones((number, number))\n matrix_zero[1:-1, 1:-1] = 0\n return matrix_zero", "def random_density_matrix(nqubits: int, dtype=np.complex128) -> np.ndarray:\n rho = random_numpy_hermitian(nqubits, dtype=dtype)\n # Normalize\n ids = np.arange(2 ** nqubits)\n rho[ids, ids] = rho[ids, ids] / np.trace(rho)\n return rho.astype(dtype)", "def random_table(self):\n rule_set = []\n for i in range(self.k ** (2 * self.r + 1) - 1):\n g = np.random.rand()\n if g > self.lambda_param:\n g = 0\n else:\n g = np.random.randint(1, self.k)\n rule_set.append(g)\n rule_set.append(0)\n return rule_set", "def form_triu_matrix(arr):\n n = int(np.ceil((np.sqrt(1 + 8 * len(arr)) - 1) * 0.5))\n M = np.zeros((n, n))\n c = 0\n for i in range(n):\n for j in range(n):\n if j >= i:\n if c < len(arr):\n M[i, j] = arr[c]\n c += 1\n else:\n break\n return M", "def identity_matrix(n):\n data = [[1 if c == r else 0 for c in range(n)] for r in range(n)]\n return Matrix(data)", "def rand(self):\n return np.random.rand(self.nx)", "def gen_maze(dim, p):\n maze = []\n for i in range(dim):\n maze.append([])\n for j in range(dim):\n if(random.uniform(0, 1) < p):\n maze[i].append(1)\n else:\n maze[i].append(0)\n\n maze[0][0] = 0\n maze[dim - 1][dim - 1] = 0\n return maze", "def bin_random_mat(m,n,p_0 = 0.5):\n\treturn np.array((np.random.randn(m,n) >= p_0), dtype = np.float)", "def make_result_matrix(T):\n result_matrix = []\n # Uniform sampled distribution\n distribution = np.random.choice([1, 0], T, p=[.1, .9])\n place_holder = np.random.randn(T)\n place_holder[distribution] = np.nan # Masking\n\n # This block is to un-flatten the 25 element matrix into a 5*5 matrix\n for j in range(T):\n temp = []\n for i in range(T):\n temp.append(place_holder[i])\n result_matrix.append(temp)\n\n result_matrix = np.array(result_matrix)\n\n return result_matrix", "def uniform_sample(x):\n return np.random.choice(x)", "def random_cluster_matrix(shape):\n N, k = shape\n A = np.zeros((N, k))\n for row in A: \n row[randint(0, k - 1)] = 1\n\n return A", "def _u_naught_simple(self):\n # Random is better to give different multipliers in the subgradient phase\n return np.random.rand(self.mrows)*1.", "def sample_from_triangular(service_dist):\n\n # Convert from strings.\n lower = _get_sec(service_dist[0], spd_factor)\n mode = _get_sec(service_dist[1], spd_factor)\n upper = _get_sec(service_dist[2], spd_factor)\n\n # Sample using Numpy triangular, and return.\n sample = int(np.random.triangular(lower, mode, upper))\n return sample", "def random_planted_matrix(d, n, replace='True'):\n all_idx = np.asarray(list(zip(*np.tril_indices(d,-1))))\n chosen_idx_positions = np.random.choice(len(all_idx), size=n, replace=replace)\n subspaces = all_idx[chosen_idx_positions]\n angles = 2*np.pi * (np.random.rand(len(subspaces)) - 0.5)\n U = np.eye(d)\n for s, alpha in zip(subspaces, angles):\n U = right_givens(math.cos(alpha), math.sin(alpha), U, s[0], s[1])\n return U", "def generarMatriz(n):\n\n dist = np.random.randint(MIN_DISTANCIA,MAX_DISTANCIA/2,size=(n,n))#Hasta 50 ya que se va a sumar con la transpuesta duplicando los valores\n dist = dist + dist.T#Para hacer la matriz simetrica\n np.fill_diagonal(dist, 0)#La distancia entre una ciudad y si misma es 0\n return dist", "def get_D100():\n m = 100\n random.seed(1111*m)\n A = random.randn(m, m) + 1j*random.randn(m, m)\n A = 0.5*(A + np.conj(A).T)\n A[np.tril_indices(m, -2)] = 0\n A[np.triu_indices(m, 2)] = 0\n return A", "def sample(self):\n u = np.asarray(np.random.uniform())\n return self.invert(u)", "def get_a_tensor():\n dev = random.choice(devices)\n shape = random.choice(((1), (2, 3), (4, 5, 6), (7, 8, 9, 10)))\n t = torch.rand(shape).to(dev)\n nonlocal expected\n expected += t.numel()\n return t", "def bernoulli_adjacency_matrix(E):\n A = np.random.binomial(1, E)\n A = np.tril(A) + np.tril(A, -1).T\n return A", "def rand_uni_val() -> float:\n return random.uniform(0, 1)", "def make_table(m, n):\n return np.array([[0] * n for _ in range(m)], dtype=float)", "def randomgrid(self, n):\n lam = np.random.random((n, 3))\n return self.normalize(lam)", "def gen_Ψ(self, n_dims):\n Ψ = np.diag(10 / np.random.sample(n_dims))\n\n return Ψ", "def get_random_discrete(m, n):\n\n return np.random.choice([-1.0,1.0], size=(m,n))", "def random_transpose(pianoroll):\n semitone = np.random.randint(-5, 6)\n if semitone > 0:\n pianoroll[:, semitone:, 1:] = pianoroll[:, :-semitone, 1:]\n pianoroll[:, :semitone, 1:] = 0\n elif semitone < 0:\n pianoroll[:, :semitone, 1:] = pianoroll[:, -semitone:, 1:]\n pianoroll[:, semitone:, 1:] = 0\n return pianoroll", "def identity_matrix(n):\n I = zeros_matrix(n, n)\n for i in range(n):\n I[i][i] = 1.0\n\n return I", "def identity_matrix(n):\n I = zeros_matrix(n, n)\n for i in range(n):\n I[i][i] = 1.0\n\n return I", "def triangular_number(n):\n return n*(n+1) / 2", "def get_B100():\n m = 100\n random.seed(1111*m)\n A = random.randn(m, m) + 1j*random.randn(m, m)\n A[np.tril_indices(m, -2)] = 0\n return A", "def random_matrix():\n # Initialize random angles\n theta1 = np.random.rand() * 360\n theta2 = np.random.rand() * 360\n theta3 = np.random.rand() * 360\n\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glPushMatrix()\n glRotatef(theta1, 1.0, 0.0, 0.0)\n glRotatef(theta2, 0.0, 1.0, 0.0)\n glRotatef(theta3, 0.0, 0.0, 1.0)\n matrix = glGetDoublev(GL_MODELVIEW_MATRIX)\n glPopMatrix()\n glPopMatrix()\n return matrix", "def identity(n):\r\n I = np.zeros((n, n))\r\n diag = np.ones(n)\r\n np.fill_diagonal(I, diag)\r\n return matrix(I)", "def random_vec(self, rand):\n return array([rand.uniform(*c) for c in self.constraints])", "def uniform_but_one_dataset(n, p):\n elements = []\n for i in range(n):\n elements.append((i, 1))\n elements.append((1, (n**(1.0 / p)) - 1))\n return elements", "def init_function(matrix_dimensions):\n\n return numpy.random.uniform(\n low=min_value, high=max_value, size=matrix_dimensions)", "def _make_random_matrix(self, n_components, n_features):", "def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b / np.sum(b, 1)[:, None]", "def random_board(n):\r\n \r\n return(np.random.randint(0,n-1, size = n))", "def _generate_uniform_planes(self):\n return np.random.randn(self.m, self.dim)", "def generate_positive_semi_definite_matrix(n_dim):\n cov = np.random.randn(n_dim, n_dim)\n return np.dot(cov, cov.T)", "def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]", "def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]", "def create_diagonal_image(n):\n alist = []\n for i in range(0, n):\n alist.append([])\n for j in range(0,n):\n if i ==j:\n alist[i].append(1)\n else:\n alist[i].append(0)\n\n ##########\n # img = create_zeroed_image(n)\n # for i in range(n):\n# for j in range(n):\n# if i == j:\n# img[i][j] = 1\n #######\n \n return alist", "def draw_random_u(d):\n mu = np.zeros(d)\n cov = np.eye(d)\n u = multivariate_normal.rvs(mean=mu, cov=cov)\n return u / np.linalg.norm(u)", "def generate_sparse(n, s):\n x = np.zeros(n)\n I = np.random.randint(0, n, s)\n x[I] = 1\n return x", "def mutate_matrix(matrix):\n L = len(matrix)\n r_i = random.randrange(L)\n r_j = random.randrange(4)\n r = random.gauss(0,1)\n return [[matrix[i][j]+r*(i==r_i)*(j==r_j)\n for j in range(4)] for i in range(L)]", "def random_init(self, shape):\n return np.random.randn(shape[0],shape[1])*0.01", "def rand_tri_gauss(n1=100, n2=100, n3=100, mu1=[1, 1],\n mu2=[-1, -1], mu3=[1, -1], sigma1=[0.1, 0.1],\n sigma2=[0.1, 0.1], sigma3=[0.1, 0.1]):\n ex1 = rand_gauss(n1, mu1, sigma1)\n ex2 = rand_gauss(n2, mu2, sigma2)\n ex3 = rand_gauss(n3, mu3, sigma3)\n res = np.vstack([np.hstack([ex1, 1. * np.ones((n1, 1))]),\n np.hstack([ex2, 2. * np.ones((n2, 1))]),\n np.hstack([ex3, 3. * np.ones((n3, 1))])])\n ind = np.arange(res.shape[0])\n np.random.shuffle(ind)\n return np.array(res[ind, :])", "def gen_vector(size):\n solution = []\n for i in range(size):\n rand_num = uniform(-size, size)\n solution.append(rand_num)\n return np.array(solution)", "def generate_sparse_randn(n, s):\n x = np.zeros(n)\n I = np.random.randint(0, n, s)\n x[I] = np.random.randn(s)\n return x", "def random_state(N, p):\n m = int(N * p)\n s = np.concatenate([np.ones(m), np.ones(N-m) * -1]).astype(np.int8)\n np.random.shuffle(s)\n return s", "def generate_diagonal(n, l):\n res = []\n arr = [1] * l\n l = l+1\n for diag in range(n):\n res = []\n for index in range(1, l):\n summed = sum(arr[:index]) # sum is really slow for large numbers\n res.append(summed)\n arr = res\n return (arr)", "def unique_random(table, ind_range, subset_size: int):\n np.random.seed(seed)\n # choose indices\n rand_ind = np.random.choice(\n np.arange(ind_range[0], ind_range[1]), subset_size, replace=False)\n # assist table\n return table[rand_ind]", "def rand(self):\n q = pinocchio.randomConfiguration(self.model)\n v = np.random.rand(self.model.nv) * 2 - 1\n return np.concatenate([q.flat, v])", "def random_normal():\n return inverse_normal_cdf(random.random())", "def random_normal():\r\n return inverse_normal_cdf(random.random())", "def random_rotation_matrix(strength=None, dtype=None):\n if strength is None:\n strength = 1.0\n\n if dtype is None:\n dtype = np.float32\n\n x = np.random.rand(3)\n theta = x[0] * 2 * np.pi * strength\n phi = x[1] * 2 * np.pi\n z = x[2] * strength\n\n r = np.sqrt(z)\n V = np.array([np.sin(phi) * r, np.cos(phi) * r, np.sqrt(2.0 - z)])\n\n st = np.sin(theta)\n ct = np.cos(theta)\n\n Rz = np.array([[ct, st, 0], [-st, ct, 0], [0, 0, 1]])\n\n rand_R = (np.outer(V, V) - np.eye(3)).dot(Rz)\n return rand_R.astype(dtype)", "def normal(n):\n m=np.zeros((n,n))\n for i,j in itertools.product(range(n), range(n)):\n m[i][j]=normalvariate(0,1)\n return m", "def completar_matrix(mtx, n, randomChar=True):\n for i in range(n):\n for e in range(n):\n if randomChar :\n if mtx[i][e] == \"\" : mtx[i][e] = random.choice(string.ascii_lowercase)\n else:\n if mtx[i][e] == \"\" : mtx[i][e] = \"*\"\n return mtx", "def initial_vector(self):\n\n return asarray([np.random.uniform(l, u) for l, u in self.bounds])", "def random():\n return constant(1)", "def rand(cls, m, n=None):\n if not n:\n n = m\n x = np.random.normal(size=n * m)\n x.resize(m, n)\n return cls(x)", "def _random_oriented_simplex(n):\n vertices = np.random.rand(n + 1, n)\n while orientation(vertices) < 0.0:\n vertices = np.random.rand(n + 1, n)\n return vertices", "def generate_diagonal_factors(n):\n\tfactors = np.array([1, -1]) # Initialize the diag terms with the diagonal of the Z Pauli matrix\n\tfor _ in range(n - 1): # Iterate n - 1 times\n\t\tfactors = np.hstack([factors, factors * -1]) # Append the same array multiplied by -1\n\treturn factors", "def randn(self, *args, **kwargs):\n # TODO: Put this in torch.cuda.randn\n return torch.empty(*args, **kwargs).normal_()", "def randu(*shape):\n # ATTENTION: whether you use randu or randn can make a difference.\n return 2*rand(*shape)-1", "def identity_matrix(self, n):\r\n IdM = self.zeros_matrix(n, n)\r\n for i in range(n):\r\n IdM[i][i] = 1.0\r\n \r\n return IdM", "def get_rand_combos_matrix(rows, cols, normal=False):\n np.random.seed(57)\n # TODO perhaps explore different types of random matrices?\n # randn was giving me conditioning problems\n if normal:\n C = np.random.normal(loc=0, scale=1, size=(rows, cols))\n return C\n size = max(rows, cols)\n C = ortho_group.rvs(size)\n return C[:rows, :cols]", "def random_grid(height, width):\n grid = create_grid(height, width)\n for r in range(1, height - 1):\n for c in range(1, width - 1):\n grid[r][c] = random.choice([0, 1])\n return grid", "def tril(m, k=0):\r\n return m * tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype)", "def sample_bernoulli(self, probabilities):\n return tf.nn.relu(tf.sign(probabilities - tf.random.uniform(probabilities.shape)))", "def _generate_random_vector(size):\n return np.random.uniform(-0.1, 0.1, size)", "def triangleFunction(self):\n \n w = np.zeros((self.N))\n l = self.l\n for i in range(self.r.shape[0]):\n r = np.abs(self.r[i])\n if r <= l:\n tf = lambda r,l : 1 - r/l\n w[i] = tf(r,l)\n else:\n w[i] = 0\n self.w = w", "def _GenerateUniqueRandomInputTensor(self, shape):\n num_elements = 1\n for size in shape:\n num_elements *= size\n x = np.arange(num_elements, dtype=np.float32)\n self._PRNG.shuffle(x)\n return x.reshape(shape)" ]
[ "0.6796665", "0.6433331", "0.6395273", "0.63390714", "0.63257", "0.6278706", "0.61659557", "0.61443967", "0.60865873", "0.5927011", "0.5907153", "0.5863584", "0.5841046", "0.58366776", "0.57918566", "0.5788798", "0.5776318", "0.57745236", "0.57488704", "0.5734677", "0.56929296", "0.56653774", "0.56517637", "0.56503206", "0.5616845", "0.5604512", "0.5602481", "0.5602272", "0.55816734", "0.55757606", "0.5569142", "0.55642754", "0.55519044", "0.55455375", "0.5544021", "0.5541596", "0.5510012", "0.5507994", "0.55048966", "0.5499128", "0.5480714", "0.5475037", "0.54513514", "0.5449953", "0.5447862", "0.54425174", "0.54370356", "0.5427511", "0.5417289", "0.5417285", "0.54111683", "0.5410552", "0.54085034", "0.54085034", "0.54054284", "0.54024", "0.5392096", "0.5391099", "0.53883845", "0.5386788", "0.5380865", "0.5370243", "0.536657", "0.53562415", "0.5355132", "0.5353428", "0.53513914", "0.53513914", "0.534865", "0.5347406", "0.53386384", "0.5338058", "0.53322494", "0.5328125", "0.5320799", "0.5317774", "0.5312995", "0.53113246", "0.52987176", "0.52963", "0.5294812", "0.5294088", "0.5292437", "0.529043", "0.5275535", "0.5272966", "0.5270725", "0.52617717", "0.5251387", "0.524735", "0.52417755", "0.52411515", "0.5237439", "0.52355427", "0.5234345", "0.52297086", "0.52291185", "0.5223007", "0.5221892", "0.5221257" ]
0.74994147
0
Performs batched calculation of `v^T A v` transform. Special case of bilinear form `x^T A y`
def batch_vTAv(A: np.ndarray, v: np.ndarray) -> np.ndarray: """ Faster than Av = np.matmul(A, v[...,:,None]) # [B, X, 1] return np.matmul(v[...,None,:], Av).squeeze((-2, -1)) # [B] """ return np.einsum("...k,...kl,...l->...", v, A, v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def f(t, x, n, v):\n total = 0\n for i in range(n+1):\n for j in range(n+1):\n for k in range(v):\n total = t[i][j] * x[i][j][k]", "def __call__(self, x, y):\n #- TODO: compare speed to solution at\n #- http://stackoverflow.com/questions/12729228/simple-efficient-bilinear-interpolation-of-images-in-numpy-and-python\n \n #- Find where we are in grid\n #- clip to 1 because we will use i and i-1\n #- clip to len(x)-1 to allow extrapolation beyond grid boundary\n ix = np.searchsorted(self.x, x).clip(1, len(self.x)-1)\n iy = np.searchsorted(self.y, y).clip(1, len(self.y)-1)\n \n #- Interpolation distances from points\n dx = (x - self.x[ix-1]) / (self.x[ix] - self.x[ix-1])\n dy = (y - self.y[iy-1]) / (self.y[iy] - self.y[iy-1])\n\n #- Interpolate, allowing x and/or y to be multi-dimensional\n #- NOTE: these are the slow steps, about equal time each\n \n #- Original code with what appears to be vestigial transposes\n # data1 = (self.data[ix-1,iy-1].T*(1-dx) + self.data[ix,iy-1].T*dx).T\n # data2 = (self.data[ix-1,iy].T*(1-dx) + self.data[ix,iy].T*dx).T\n # dataxy = (data1.T*(1-dy) + data2.T*dy).T\n\n #- Updated without transposes\n data1 = (self.data[ix-1,iy-1]*(1-dx) + self.data[ix,iy-1]*dx)\n data2 = (self.data[ix-1,iy]*(1-dx) + self.data[ix,iy]*dx)\n dataxy = (data1*(1-dy) + data2*dy)\n\n return dataxy", "def brute_multiply(x, y):\n \n n = x.shape[0]\n res = np.zeros(x.shape)\n \n for i in range(n):\n for j in range(n):\n for k in range(n):\n res[i, j] += x[i, k] * y[k, j]\n \n return res", "def __call__(self, x):\n return self._pre_scale * tf.matmul(x, self._weight) + self._bias", "def extforce (u, v):\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n u[i,j], v[i,j] = np.stack((u[i,j], v[i,j])) + dt * extacc\r\n\r\n return u, v", "def apply(self,v):\n return np.tensordot(self._transform, v, axes=([1],[0])) \\\n + self._translation", "def _gv_bilinear(self, r, t):\n mv1t = torch.matmul(self.mv1.weight, t.T) # [k, b]\n mv2r = torch.matmul(self.mv2.weight, r.T) # [k, b]\n return (mv1t * mv2r + self.bv.weight).T # [b, k]", "def temporal_affine_forward(x, w, b):\n N, T, D = x.shape\n M = b.shape[0]\n out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b\n cache = x, w, b, out\n return out, cache", "def ulab_bilinear_interpolation():\n GRID_DATA[1::2, ::2] = SENSOR_DATA[:-1, :]\n GRID_DATA[1::2, ::2] += SENSOR_DATA[1:, :]\n GRID_DATA[1::2, ::2] /= 2\n GRID_DATA[::, 1::2] = GRID_DATA[::, :-1:2]\n GRID_DATA[::, 1::2] += GRID_DATA[::, 2::2]\n GRID_DATA[::, 1::2] /= 2", "def test_gemm_with_vector():\r\n X, Y, Z, a, b = XYZab()\r\n v = T.vector()\r\n\r\n def my_just_gemm(o):\r\n i = [X, Y, Z, a, b, v]\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, )]\r\n rval = just_gemm(i, o, ishapes=ishapes)\r\n\r\n my_just_gemm([v + T.dot(X, Y) * a + Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) + b * Z])\r\n my_just_gemm([v + b * Z + a * T.dot(X, Y)])\r\n my_just_gemm([v + T.dot(X, Y) * a - Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) - b * Z])\r\n my_just_gemm([v + b * Z - a * T.dot(X, Y)])\r\n\r\n #with N multiplications instead of just one\r\n my_just_gemm([v + (b * b) * Z * a + (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([v + Z + T.dot(X, Y)])\r\n my_just_gemm([v + Z * b + T.dot(X, Y)])\r\n my_just_gemm([v + Z + a * b * a * T.dot(X, Y)])\r\n my_just_gemm([v + (b * b) * Z * a - (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([Z - T.dot(X, Y) + v])\r\n my_just_gemm([Z * b - T.dot(X, Y) + v])\r\n my_just_gemm([Z - a * b * a * T.dot(X, Y) + v])", "def apply(self,i,x):\n #applies the ith map to the point x\n y = self.A[i,:,:] @ x + self.b[i,:]\n return y", "def advect (u, v):\r\n # NOTICE: memory usage might be too high, could optimize\r\n\r\n # Store the values from timestep n\r\n un = u\r\n vn = v\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n oldpos = coord (i,j) - dt * np.stack((u[i,j], v[i,j]))\r\n u[i,j], v[i,j] = interpolate (un, vn, oldpos)\r\n\r\n\r\n # Return values for timestep n+1\r\n return u, v", "def forward(x, pi, A, B):\n # TODO: Write this function.\n #x = x[1]\n B_col = B[:, x[0]] # [N_z, 1]\n alpha = np.multiply(pi, B_col)\n ret = np.zeros((x.shape[0], pi.shape[0]))\n ret[0] = alpha\n for i in range(1, x.shape[0]):\n B_col = B[:, x[i]]\n sum_term = np.dot(A, alpha) #before: alpha, A\n alpha = np.multiply(B_col, sum_term) #before: sum_term before\n ret[i] = alpha\n return ret", "def call(self, x):\n return tf.tile(x, self._mult)", "def update_params(self, x_a, r_t, a_t):\n self.A_a[a_t] = self.A_a[a_t] + x_a[:, a_t].reshape(-1, 1).dot(x_a[:, a_t].reshape(-1, 1).T)\n self.A_a_inv[a_t] = inv(self.A_a[a_t])\n self.b_a[a_t] = self.b_a[a_t] + x_a[:, a_t].reshape(-1, 1) * r_t", "def skydiving_iterate(v, t, dt, X, Y):\n return (v + dt*X(t))/(1 + dt*Y(t)*abs(v))", "def two_bs2x4_transform_opt(t1, r1, t2, r2, input_state):\n size = len(input_state)\n out = np.zeros((size,) * 4, dtype=complex)\n\n def coef(k1, k2, k3, k4):\n return t1 ** k2 * (1j * r1) ** k1 * t2 ** k4 * (1j * r2) ** k3 / (factorial(k1) * factorial(k2) * factorial(k3) * factorial(k4))\n\n # index 'i' = (m,n,k,l)\n for i in np.ndindex(size, size, size, size):\n if i[2] <= i[0] and i[3] <= i[1] and i[0] + i[1] < size:\n out[i[2], i[0] - i[2], i[3], i[1] - i[3]] = coef(i[2], i[0] - i[2], i[3], i[1] - i[3]) * input_state[i[0], i[1]] * factorial(i[0]) * factorial(i[1])\n\n return out", "def inplace(block_size=20000):\n y = np.empty(len(x))\n for k in range(len(x) // block_size + 1):\n b, e = k * block_size, (k+1) * block_size\n y[b:e] = x[b:e]\n y[b:e] *= .25\n y[b:e] += .75\n y[b:e] *= x[b:e]\n y[b:e] -= 1.5\n y[b:e] *= x[b:e]\n y[b:e] -= 2\n\n return y", "def _apply_cost_to_vec(\n self, vec: jnp.ndarray, axis: int = 0, fn=None\n ) -> jnp.ndarray:\n vec = jnp.reshape(vec, self.grid_size)\n accum_vec = jnp.zeros_like(vec)\n indices = list(range(1, self.grid_dimension))\n for dimension, geom in enumerate(self.geometries):\n cost = geom.cost_matrix\n ind = indices.copy()\n ind.insert(dimension, 0)\n if axis == 0:\n cost = cost.T\n accum_vec += jnp.sum(\n jnp.tensordot(cost, vec, axes=([0], [dimension])),\n axis=indices,\n keepdims=True\n ).transpose(ind)\n return accum_vec.ravel()", "def transform(tvec1, rvec1, tvec2, rvec2):\n op = localToGlobal(np.squeeze(tvec2), np.squeeze(rvec2))\n tvec3 = []\n for tvec in tvec1:\n #tvec = tvec.squeeze()\n tvec3.append(np.matmul(op, tvec))\n tvec3 = np.array(tvec3)\n return tvec3", "def affine(params, x):\n return np.dot(params['w'], x) + params['b']", "def bilinearInterpolation(self,point,indexes,fieldTranspose):\n x,y = point\n x1,x2 = self.coordinates[0][indexes[0]],self.coordinates[0][indexes[1]]\n y1,y2 = self.coordinates[1][indexes[1]],self.coordinates[1][indexes[2]]\n f11 = fieldTranspose[indexes[0]]\n f21 = fieldTranspose[indexes[1]]\n f12 = fieldTranspose[indexes[2]]\n f22 = fieldTranspose[indexes[3]]\n return 1/((x2-x1)*(y2-y1))*(f11*(x2-x)*(y2-y)+f21*(x-x1)*(y2-y)+f12*(x2-x)*(y-y1)+f22*(x-x1)*(y-y1))", "def beta_A_isometric_monte_carlo(self, v, **kwargs):\r\n v = self.np_array(v)\r\n beta_A = np.zeros(v.shape)\r\n for i, v_i in enumerate(v):\r\n self.beta_E = lambda lambda_: self.beta_U_1(lambda_) + \\\r\n self.beta_A_0_abs_isometric(1, lambda_)\r\n\r\n def serial_fun(init_config, **kwargs):\r\n return self.beta_A_isometric_monte_carlo_serial(\r\n v_i, init_config, **kwargs\r\n )\r\n\r\n beta_A[i] = self.parallel_calculation(\r\n serial_fun,\r\n self.minimize_beta_U(v_i)[2][-self.M:, 0],\r\n **kwargs\r\n )\r\n return beta_A", "def create_A(matches, tilespecs, mesh, **kwargs):\n # let's assume translation halfsize\n dof_per_tile = 1\n dof_per_vertex = 1\n vertex_per_patch = 3\n nnz_per_row = 2*(dof_per_tile + vertex_per_patch * dof_per_vertex)\n nrows = sum([len(m['matches']['p'][0]) for m in matches])\n nd = nnz_per_row*nrows\n lens_dof_start = dof_per_tile*len(tilespecs)\n\n data = np.zeros(nd).astype('float64')\n b = np.zeros((nrows, 2)).astype('float64')\n indices = np.zeros(nd).astype('int64')\n indptr = np.zeros(nrows+1).astype('int64')\n indptr[1:] = np.arange(1, nrows+1)*nnz_per_row\n weights = np.ones(nrows).astype('float64')\n\n unique_ids = np.array(\n [t.tileId for t in tilespecs])\n\n # nothing fancy here, row-by-row\n offset = 0\n rows = 0\n\n for mi in range(len(matches)):\n m = matches[mi]\n pindex = np.argwhere(unique_ids == m['pId'])\n qindex = np.argwhere(unique_ids == m['qId'])\n\n npoint_pairs = len(m['matches']['q'][0])\n # get barycentric coordinates ready\n pcoords = np.transpose(\n np.vstack(\n (m['matches']['p'][0],\n m['matches']['p'][1])\n )).astype('float64')\n qcoords = np.transpose(\n np.vstack(\n (m['matches']['q'][0],\n m['matches']['q'][1])\n )).astype('float64')\n\n b[rows: (rows + pcoords.shape[0])] = qcoords - pcoords\n rows += pcoords.shape[0]\n pbary = compute_barycentrics(pcoords, mesh, **kwargs)\n qbary = compute_barycentrics(qcoords, mesh, **kwargs)\n\n mstep = np.arange(npoint_pairs) * nnz_per_row + offset\n\n data[mstep + 0] = 1.0\n data[mstep + 1] = -1.0\n data[mstep + 2] = pbary[0][:, 0]\n data[mstep + 3] = pbary[0][:, 1]\n data[mstep + 4] = pbary[0][:, 2]\n data[mstep + 5] = -qbary[0][:, 0]\n data[mstep + 6] = -qbary[0][:, 1]\n data[mstep + 7] = -qbary[0][:, 2]\n\n indices[mstep + 0] = pindex\n indices[mstep + 1] = qindex\n indices[mstep + 2] = (lens_dof_start +\n mesh.simplices[pbary[1][:]][:, 0])\n indices[mstep + 3] = (lens_dof_start +\n mesh.simplices[pbary[1][:]][:, 1])\n indices[mstep + 4] = (lens_dof_start +\n mesh.simplices[pbary[1][:]][:, 2])\n indices[mstep + 5] = (lens_dof_start +\n mesh.simplices[qbary[1][:]][:, 0])\n indices[mstep + 6] = (lens_dof_start +\n mesh.simplices[qbary[1][:]][:, 1])\n indices[mstep + 7] = (lens_dof_start +\n mesh.simplices[qbary[1][:]][:, 2])\n\n offset += npoint_pairs*nnz_per_row\n\n A = csr_matrix((data, indices, indptr), dtype='float64')\n\n wts = sparse.eye(weights.size, format='csr', dtype='float64')\n wts.data = weights\n return A, wts, b, lens_dof_start", "def linear_forward(A, W, b):\n \n ### START CODE HERE ### (≈ 1 line of code)\n Z = np.dot( W, A ) + b\n ### END CODE HERE ###\n \n assert(Z.shape == (W.shape[0], A.shape[1]))\n cache = (A, W, b)\n \n return Z, cache", "def apply_matrix(self, A):\n assert self.is_vector(), 'Can only apply matrices to vector-valued functions'\n C = np.matmul(A, self.coeffs[..., None])\n assert C.shape[-1] == 1 # this should have created a new singleton axis\n return BSplineFunc(self.kvs, np.squeeze(C, axis=-1))", "def two_bs2x4_transform(t1, r1, t2, r2, input_state):\n size = len(input_state)\n output_state = np.zeros((size,) * 4, dtype=complex)\n for m in range(size):\n for n in range(size):\n\n for k in range(m + 1):\n for l in range(n + 1):\n # channels indexes\n ind1 = k\n ind2 = m - k\n ind3 = l\n ind4 = n - l\n coeff = input_state[m, n] * t1**(m - k) * (1j*r1)**k * t2**(n - l) * (1j*r2)**l * factorial(m) * factorial(n) / (factorial(k) * factorial(m - k) * factorial(l) * factorial(n - l))\n output_state[ind1, ind2, ind3, ind4] = output_state[ind1, ind2, ind3, ind4] + coeff\n\n return output_state", "def _inner_product_a0(self, tangent_vec_a, tangent_vec_b, vertex_areas_bp):\n return self.a0 * gs.sum(\n vertex_areas_bp\n * gs.einsum(\"...bi,...bi->...b\", tangent_vec_a, tangent_vec_b),\n axis=-1,\n )", "def transform(fn):\n def _(vec, dt):\n return np.einsum(\n 'ji,i,ki,k...->j...',\n evecs, fn(evals, dt), evecs, vec, optimize=True)\n\n return _", "def vectorized_loops(self, data):\n\n # TODO: finish this.\n return np.add(np.multiply(data,data), data)", "def test_batch_vector_substitutions(\n free_alg, full_balance, simplify\n):\n\n dr = free_alg\n p = dr.names\n\n a = IndexedBase('a')\n x = IndexedBase('x')\n y = IndexedBase('y')\n i, j = p.i, p.j\n v = p.v\n v_dag = Vec('v', indices=(CR,))\n\n #\n # Spin flipping\n #\n\n orig1 = dr.sum((i, p.R), (j, p.R), a[i, j] * v[i, UP] * v[j, DOWN])\n defs1 = [\n dr.define(v[i, UP], v[i, DOWN]), dr.define(v[i, DOWN], v[i, UP])\n ]\n\n # Sequentially apply the definitions of the substitutions\n expected_sequential = dr.sum(\n (i, p.R), (j, p.R), a[i, j] * v[i, UP] * v[j, UP]\n )\n res = orig1.subst_all(\n defs1, simult_all=False, full_balance=full_balance, simplify=simplify\n )\n assert res == expected_sequential\n\n # Simultaneously apply the definitions of the substitutions\n expected_simutaneous = dr.sum(\n (i, p.R), (j, p.R), a[i, j] * v[i, DOWN] * v[j, UP]\n )\n res = orig1.subst_all(\n defs1, simult_all=True, full_balance=full_balance, simplify=simplify\n )\n assert res == expected_simutaneous\n\n #\n # In-place BCS transformation\n #\n\n orig2 = dr.einst(\n a[i, j] * v_dag[i, UP] * v[j, UP] +\n a[i, j] * v_dag[i, DOWN] * v[j, DOWN]\n )\n defs2 = [\n dr.define(v_dag[i, UP], x[i] * v_dag[i, UP] - y[i] * v[i, DOWN]),\n dr.define(v_dag[i, DOWN], x[i] * v_dag[i, DOWN] + y[i] * v[i, UP]),\n dr.define(v[i, UP], x[i] * v[i, UP] - y[i] * v_dag[i, DOWN]),\n dr.define(v[i, DOWN], x[i] * v[i, DOWN] + y[i] * v_dag[i, UP]),\n ]\n\n # Sequentially apply the definitions of the substitutions\n expected_sequential = orig2\n for def_ in defs2:\n expected_sequential = def_.act(expected_sequential)\n expected_sequential = expected_sequential.simplify()\n res = orig2.subst_all(\n defs2, simult_all=False, full_balance=full_balance, simplify=simplify\n ).simplify()\n assert res == expected_sequential\n\n # Simultaneously apply the definitions of the substitutions\n expected_simutaneous = dr.sum(\n (i, p.R), (j, p.R), a[i, j] * (\n (x[i] * v_dag[i, UP] - y[i] * v[i, DOWN])\n * (x[j] * v[j, UP] - y[j] * v_dag[j, DOWN])\n + (x[i] * v_dag[i, DOWN] + y[i] * v[i, UP])\n * (x[j] * v[j, DOWN] + y[j] * v_dag[j, UP])\n )\n ).simplify()\n res = orig2.subst_all(\n defs2, simult_all=True, full_balance=full_balance, simplify=simplify\n ).simplify()\n assert res == expected_simutaneous", "def interpolate(x1, x2, u, N):\n \n # finding the magnitude of each component\n a1 = np.matmul(x1, u)\n a2 = np.matmul(x2, u)\n\n ims = [np.matmul(u, t * a1 + (1 - t) * a2) \\\n for t in np.linspace(0, 1, N)]\n\n return np.stack(ims, 0)", "def predict_numba(\n coordinates, points, coeffs, result, greens_function\n): # pylint: disable=not-an-iterable\n east, north, upward = coordinates[:]\n point_east, point_north, point_upward = points[:]\n for i in prange(east.size):\n for j in range(point_east.size):\n result[i] += coeffs[j] * greens_function(\n east[i],\n north[i],\n upward[i],\n point_east[j],\n point_north[j],\n point_upward[j],\n )", "def inner_products(t_S, t_Var, t_XS, t_YS, t_XE, t_YE, t_XR, t_YR):\n\n # Note in this computation, we do the indices in this form:\n # b, i, j, t\n # batch, pixel, neuron, time step\n\n # indices: b, i1, j, t\n t_dX = (t_XS.dimshuffle('x', 0, 'x', 'x') -\n t_XE.dimshuffle('x', 'x', 0, 'x') -\n t_XR.dimshuffle(0, 'x', 'x', 1))\n t_dX.name = 'dX'\n # indices: b, i2, j, t\n t_dY = (t_YS.dimshuffle('x', 0, 'x', 'x') -\n t_YE.dimshuffle('x', 'x', 0, 'x') -\n t_YR.dimshuffle(0, 'x', 'x', 1))\n t_dY.name = 'dY'\n\n # Use outer product trick to dot product image with point filters\n t_PixRFCouplingX = T.exp(-0.5 * t_dX ** 2 /\n t_Var.dimshuffle('x', 0, 'x', 'x'))\n t_PixRFCouplingY = T.exp(-0.5 * t_dY ** 2 /\n t_Var.dimshuffle('x', 0, 'x', 'x'))\n t_PixRFCouplingX.name = 'PixRFCouplingX'\n t_PixRFCouplingY.name = 'PixRFCouplingY'\n\n # Matrix of inner products between the images and the retinal RFs\n # indices: b, j, t\n # Sum_i2 T(i2, i1) * T(b, i2, j, t) = T(b, i1, j, t)\n t_IpsY = T.sum(t_S.dimshuffle('x', 0, 1, 'x', 'x') *\n t_PixRFCouplingY.dimshuffle(0, 1, 'x', 2, 3),\n axis=1)\n # Sum_i1 T(b, i1, j, t) * T(b, i2, j, t) = T(b, j, t)\n t_Ips = T.sum(t_IpsY * t_PixRFCouplingX, axis=1)\n t_Ips.name = 'Ips'\n\n # For the gradient, we also prepare d Ips / dS\n # This is in the form b, i2, i1, j, t\n t_PixRFCoupling = (t_PixRFCouplingX.dimshuffle(0, 'x', 1, 2, 3) *\n t_PixRFCouplingY.dimshuffle(0, 1, 'x', 2, 3))\n\n return t_Ips, t_PixRFCoupling", "def _inner_product_a2(\n self, tangent_vec_a, tangent_vec_b, base_point, vertex_areas_bp\n ):\n laplacian_at_base_point = self._space.laplacian(base_point)\n return self.a2 * gs.sum(\n gs.einsum(\n \"...bi,...bi->...b\",\n laplacian_at_base_point(tangent_vec_a),\n laplacian_at_base_point(tangent_vec_b),\n )\n / vertex_areas_bp,\n axis=-1,\n )", "def cubic_evolve(self,nt=1):\n #loop through time steps\n for l in range(nt):\n # temporary array\n y_temp = np.zeros(self.y.shape[0])\n # loop through array\n for i in range(self.y.shape[0]):\n # idx left to departure point\n x_dep = self.x[i]-self.u[i]*self.dt\n j = int(np.floor(x_dep/self.dx))\n # alpha\n a = (self.x[i]-self.u[i]*self.dt - j*self.dx)/self.dx\n # calculate next time step\n f = lambda x: x % self.y.shape[0] if x >= self.y.shape[0] else x\n y_temp[i] = - a * (1-a)*(2-a)/6 * self.y[f(j-1)]\n y_temp[i] += (1-a**2)*(2-a)/2 * self.y[f(j)]\n y_temp[i] += a*(1+a)*(2-a)/2 * self.y[f(j+1)]\n y_temp[i] -= a*(1-a**2)/6 * self.y[f(j+2)]\n self.y = np.copy(y_temp)\n return self.y", "def __call__(self, t=1 / 2):\n return (t * self.vector)(self.p1)", "def _gv_linear(self, r, t):\n mv1t = torch.matmul(self.mv1.weight, t.T) # [k, b]\n mv2r = torch.matmul(self.mv2.weight, r.T) # [k, b]\n return (mv1t + mv2r + self.bv.weight).T # [b, k]", "def mul(Z,X,Y):", "def affine_forward(x,w,b):\n out=None\n N=x.shape[0]\n x_row=x.reshape(N,-1)\n out=np.dot(x_row,w)+b\n cache=(x,w,b)\n return out,cache", "def inner_product(self, tangent_vec_a, tangent_vec_b, base_point):\n inner_prod_mat = self.metric_matrix(base_point)\n aux = gs.einsum(\"...j,...jk->...k\", gs.conj(tangent_vec_a), inner_prod_mat)\n return gs.dot(aux, tangent_vec_b)", "def affine_forward(x, w, b):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n out=np.dot(x,w)+b\n cache=(x,w,b)\n return(out, cache)", "def V(I, dT, a, b, c, d, e, f):\n x1 = I # I\n x2 = dT # dT\n m = (a * x1 ** 2 + b * x1 + c)\n b = (d * x1 ** 2 + e * x1 + f)\n return m * x2 + b", "def alternative_iterative_method(x0, n, gamma, b):\n # Parameters:\n MAX_ITER = 1000\n n2 = n**2\n\n # Creating NxN versions of vector for easier indexing during iteration\n b = b.copy().reshape(n, n)\n b_transposed = b.copy().T\n x0 = x0.copy().reshape(n, n)\n x0_transposed = x0.copy().T\n x1 = x0.copy()\n x1_transposed = x0_transposed.copy()\n\n # No need for M, N, only a smaller tridiagonal system:\n H = scipy.sparse.diags((-1, 2, -1), (-1, 0, 1), shape=(n, n), format=\"csr\")\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n, n), format=\"csr\")\n M1 = gammaI + H # Corresponds to both (gI + M) & (gI + N) in equations\n M2 = gammaI - H # Corresponds to both (gI - M) & (gI - N) in equations\n\n # Preallocating RHS of equations\n RHS7 = np.zeros((n, n), dtype=np.float64)\n RHS8 = np.zeros((n, n), dtype=np.float64)\n\n k = 0\n while k < MAX_ITER:\n for i in range(n): # Loading RHS values for Equation (7):\n RHS7[:, i] = scipy.sparse.csr_matrix.dot(M2, x0_transposed[i]) + b_transposed[i]\n for i in range(n): # Solving N independent tridig mat systems related to Eq(7):\n x1[i] = scipy.sparse.linalg.spsolve(M1, RHS7[i])\n RHS8[i] = scipy.sparse.csr_matrix.dot(M2, x1[i]) + b[i] # Loading RHS values for Equation (8):\n for i in range(n): # Solving N independent tridig mat systems related to Eq(8):\n x1_transposed[i] = scipy.sparse.linalg.spsolve(M1, RHS8[:, i])\n\n k += 1\n if np.allclose(x1_transposed, x0_transposed, rtol=1e-8):\n break\n x0_transposed = x1_transposed.copy()\n\n res = x1_transposed.T.reshape(n2)\n return res, k", "def f(t, x, y):\n Iq, UC, U = x # time continous state vector\n dIq = (Uq(t) - R*Iq - UC - U) / L\n dUC = Iq / C\n dU = Iq / Cp\n return array([dIq, dUC, dU])", "def _apply_cost_xy(x, y, norm_x, norm_y, vec, cost_fn, cost_pow, fn=None):\n c = _cost(x, y, norm_x, norm_y, cost_fn, cost_pow)\n return jnp.dot(c, vec) if fn is None else jnp.dot(fn(c), vec)", "def matmul(x, y):\n return np.matmul(x, y)", "def convert_matmul(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[0]), g.get_node(op.input(\"Y\")[0])]\n a_shape = infer_shape(inputs[0])\n b_shape = infer_shape(inputs[1])\n if op.has_attr(\"trans_x\"):\n # for matmul_v2\n trans_x = op.attr(\"trans_x\")\n trans_y = op.attr(\"trans_y\")\n else:\n # for matmul\n trans_x = op.attr(\"transpose_X\")\n trans_y = op.attr(\"transpose_Y\")\n if trans_x:\n perm = list(range(len(a_shape)))\n perm[-2] = len(a_shape) - 1\n perm[-1] = len(a_shape) - 2\n inputs[0] = _op.transpose(inputs[0], axes=perm)\n if trans_y:\n perm = list(range(len(b_shape)))\n perm[-2] = len(b_shape) - 1\n perm[-1] = len(b_shape) - 2\n inputs[1] = _op.transpose(inputs[1], axes=perm)\n\n # This implemention almost keeps same with ONNX\n # Need to check input shape as batch matmul must be supported.\n a_shape = shape_of(inputs[0], dtype=\"int32\")\n a_rank = infer_shape(a_shape)[0]\n b_shape = shape_of(inputs[1], dtype=\"int32\")\n b_rank = infer_shape(b_shape)[0]\n # When performing a batch matmul, we need to properly handle N-dim shapes.\n if a_rank > 2 or b_rank > 2:\n\n def flatten_to_nd(x, x_shape, nd=3):\n ndims = infer_shape(x_shape)[0]\n if ndims == nd:\n return x\n newshape = _op.concatenate(\n [\n _expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype),\n _op.strided_slice(x_shape, [ndims - nd + 1], [ndims]),\n ],\n 0,\n )\n out = _op.reshape(x, fold_constant(newshape))\n return out\n\n b_type = infer_type(inputs[1])\n # Convert to dense if the second matrix is 2d and non-dynamic\n if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):\n a = flatten_to_nd(inputs[0], a_shape, 2)\n b = _op.transpose(inputs[1])\n output = _op.nn.dense(a, b)\n else:\n # Convert a and b into 3 dimensional tensors.\n a = flatten_to_nd(inputs[0], a_shape, 3)\n b = flatten_to_nd(inputs[1], b_shape, 3)\n # Transpose matrix dimensions of b.\n b = _op.transpose(b, [0, 2, 1])\n # Perform a batch matmul.\n output = _op.nn.batch_matmul(a, b)\n # Determine the output batch dimension.\n if a_rank > b_rank:\n out_batch = _op.strided_slice(a_shape, [0], [a_rank - 2])\n elif a_rank < b_rank:\n out_batch = _op.strided_slice(b_shape, [0], [b_rank - 2])\n # If its unclear how broadcasting should be applied, the output\n # shape is determined by choosing the maximum value from each input.\n else:\n out_batch = _op.concatenate(\n [\n _op.maximum(\n _op.strided_slice(a_shape, [i], [i + 1]),\n _op.strided_slice(b_shape, [i], [i + 1]),\n )\n for i in range(a_rank - 2)\n ],\n 0,\n )\n # Reshape output to original dimensions.\n final_shape = _op.concatenate(\n [\n out_batch,\n _op.strided_slice(\n a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1]\n ),\n _op.strided_slice(\n b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]]\n ),\n ],\n 0,\n )\n out = _op.reshape(output, fold_constant(final_shape))\n else:\n if b_rank == 1:\n inputs[1] = _op.expand_dims(inputs[1], 1, 1)\n # Otherwise a simple dense op will get the job done.\n input_1_t = _op.transpose(inputs[1], axes=(1, 0))\n out = _op.nn.dense(inputs[0], input_1_t)\n if b_rank == 1:\n out = _op.squeeze(out, axis=[-1])\n if op.has_attr(\"alpha\"):\n alpha = op.attr(\"alpha\")\n if not np.isclose(alpha, 1.0):\n out = out * _expr.const(alpha).astype(\"float32\")\n g.add_node(op.output(\"Out\")[0], out)", "def bs2x2_transform(t, r, input_state):\n size = len(input_state)\n output_state = np.zeros((size*2 - 1, size*2 - 1), dtype=complex)\n\n for m in range(size):\n for n in range(size):\n # two sums up to m and n\n for k in range(m + 1):\n for l in range(n + 1):\n first_index = m - k + l # first channel index\n second_index = k + n - l # second channel index\n coeff = input_state[m, n] * (1j*r)**(k + l) * t**(m - k + n - l) * factorial(m) * factorial(n) / (factorial(k) * factorial(m - k) * factorial(l) * factorial(n - l))\n output_state[first_index, second_index] = output_state[first_index, second_index] + coeff\n\n return output_state", "def solve_pcaw(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, head, invhead, mean, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6):\n\n\n obj_lss = np.zeros(max_iter)\n x_zs = np.zeros(max_iter)\n u_norms = np.zeros(max_iter)\n times = np.zeros(max_iter)\n\n ATy = AT_fun(y)\n x_shape = ATy.shape\n d = np.prod(x_shape)\n \n def vec(x):\n return tf.reshape(x, [-1])\n\n def A_cgs_fun(x):\n x = tf.reshape(x,x_shape)\n y = AT_fun(A_fun(x)) + alpha * x\n return vec(y)\n A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float')\n\n def compute_p_inv_A(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = tf.reshape(z, x_shape)\n return z\n\n\n def A_cgs_fun_init(x):\n x = tf.reshape(x, x_shape)\n y = AT_fun(A_fun(x))\n return vec(y)\n A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float')\n\n def compute_init(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = tf.reshape(z,x_shape)\n return z\n\n # initialize z and u\n z = tf.reshape(mean,x_shape)\n u = np.zeros(x_shape)\n\n plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True)\n\n\n start_time = timeit.default_timer()\n\n for iter in range(max_iter):\n\n # x-update\n net_input = z+u\n \n Wzu = head([net_input])\n q = tfp.math.soft_threshold(Wzu, lambda_l1/alpha)\n x = invhead(q)[0]\n\n # z-update\n b = ATy + alpha * (x - u)\n z = compute_p_inv_A(b, z)\n\n # u-update\n u += z - x;\n\n if show_img_progress:\n\n fig = plt.figure('current_sol')\n plt.gcf().clear()\n fig.canvas.set_window_title('iter %d' % iter)\n plt.subplot(1,3,1)\n plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('x')\n plt.subplot(1,3,2)\n plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('z')\n plt.subplot(1,3,3)\n plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('netin')\n plt.pause(0.00001)\n\n\n obj_ls = 0.5 * np.sum(np.square(y - A_fun(x)))\n x_z = np.sqrt(np.mean(np.square(x-z)))\n u_norm = np.sqrt(np.mean(np.square(u)))\n\n obj_lss[iter] = obj_ls\n x_zs[iter] = x_z\n u_norms[iter] = u_norm\n times[iter] = timeit.default_timer() - start_time\n\n if x_z < solver_tol:\n break\n\n infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms,\n 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1,\n 'max_iter':max_iter, 'solver_tol':solver_tol}\n\n\n return (x, z, u, infos)", "def __call__(self, u, t):\n u, v = u # u is array of length 2 holding our [u, v]\n if self.damping == 'linear':\n b_term = self.b*v\n elif self.damping == 'quadratic':\n b_term = self.b*np.abs(v)*v\n else:\n b_term = 0\n return [v, (-b_term - self.s(u) + self.F(t))/self.m]", "def forward(self, x, y, t):\r\n # Compute shapes.\r\n batch_size = x.shape[0]\r\n n_in = x.shape[1]\r\n n_out = t.shape[1]\r\n\r\n # Compute the pairwise distances.\r\n # Shape: (batch, n_in, n_out).\r\n dists = compute_dists(x, t)\r\n\r\n # Compute the weights.\r\n # Shape: (batch, n_in, n_out, in_channels).\r\n wt = self.rbf(dists)\r\n\r\n # Perform the weighting.\r\n # Shape: (batch, n_in, n_out, in_channels).\r\n y_out = y.view(batch_size, n_in, -1, self.in_channels) * wt\r\n\r\n # Sum over the inputs.\r\n # Shape: (batch, n_out, in_channels).\r\n y_out = y_out.sum(1)\r\n\r\n # Apply the point-wise function.\r\n # Shape: (batch, n_out, out_channels).\r\n y_out = y_out.view(batch_size * n_out, self.in_channels)\r\n y_out = self.g(y_out)\r\n y_out = y_out.view(batch_size, n_out, self.out_channels)\r\n\r\n return y_out", "def multiply(traj, result_list):\n z=traj.x*traj.y\n result_list[traj.v_idx] = z", "def vectordot(a, b):\n return np.sum(a * b, 1)", "def cross(x, y, target=utils.CCE):\n utils.elemwise_shape_check(get_shape(y), get_shape(x))\n utils.elemwise_dtype_check(\n y.dtype, x.dtype,\n (utils.DtypeForDavinci.ALL_FLOAT) if product_is_mini() \\\n else (utils.DtypeForDavinci.FLOAT16,\n utils.DtypeForDavinci.FLOAT32,\n utils.DtypeForDavinci.INT32,\n utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.UINT8))\n\n shape = get_shape(x)\n\n if shape[0] != 3:\n raise RuntimeError(\n \"The first axis of input must be 3, actual input is %d\" % shape[0])\n\n inp_dtype = x.dtype\n need_type_convert = inp_dtype in (\"int8\", \"uint8\")\n\n shape = get_shape(x)\n shp = shape[1:]\n\n if need_type_convert:\n x = cast(x, \"float16\", target=utils.CCE)\n y = cast(y, \"float16\", target=utils.CCE)\n\n a0b1 = tvm.compute(shp, lambda *i: x(0, *i) * y(1, *i), name=\"a0b1\")\n a0b2 = tvm.compute(shp, lambda *i: x(0, *i) * y(2, *i), name=\"a0b2\")\n a1b0 = tvm.compute(shp, lambda *i: x(1, *i) * y(0, *i), name=\"a1b0\")\n a1b2 = tvm.compute(shp, lambda *i: x(1, *i) * y(2, *i), name=\"a1b2\")\n a2b0 = tvm.compute(shp, lambda *i: x(2, *i) * y(0, *i), name=\"a2b0\")\n a2b1 = tvm.compute(shp, lambda *i: x(2, *i) * y(1, *i), name=\"a2b1\")\n\n res0 = tvm.compute(shp, lambda *i: a1b2(*i) - a2b1(*i), name=\"res0\")\n res1 = tvm.compute(shp, lambda *i: a2b0(*i) - a0b2(*i), name=\"res1\")\n res2 = tvm.compute(shp, lambda *i: a0b1(*i) - a1b0(*i), name=\"res2\")\n\n res = tvm.compute(\n shape,\n lambda *i:\n tvm.expr.Select(\n i[0] == 0,\n res0(*i[1:]),\n tvm.expr.Select(i[0] == 1, res1(*i[1:]), res2(*i[1:]))),\n name='res')\n\n if need_type_convert:\n res = cast(res, inp_dtype, target=utils.CCE)\n\n return res", "def _evaluate(self, x, y):\n x_pos, y_pos = self.find_sector(x, y)\n alpha, beta = self.find_coords(x, y, x_pos, y_pos)\n\n # Calculate the function at each point using bilinear interpolation\n f = (\n (1 - alpha) * (1 - beta) * self.f_values[x_pos, y_pos]\n + (1 - alpha) * beta * self.f_values[x_pos, y_pos + 1]\n + alpha * (1 - beta) * self.f_values[x_pos + 1, y_pos]\n + alpha * beta * self.f_values[x_pos + 1, y_pos + 1]\n )\n return f", "def V_fit(x, a, b, c, d, e, f):\n x1 = x[0] # I\n x2 = x[1] # dT\n m = (a * x1 ** 2 + b * x1 + c)\n b = (d * x1 ** 2 + e * x1 + f)\n return m * x2 + b", "def fastCompose(self,coefficients,t=1,N=None):\n xa,xb,ya,yb=coefficients\n l=max(len(xa),len(xb),len(ya),len(yb))\n g=[]\n xs,ys=0,0\n if N is None: N=l\n wo=2*math.pi/N\n for n in range(N):\n xs+=xa[n]*math.cos(n*wo*t)+xb[n]*math.sin(n*wo*t)\n ys+=ya[n]*math.cos(n*wo*t)+yb[n]*math.sin(n*wo*t)\n #xs+=xa[n]*math.cos(n*wo*t)\n #ys+=yb[n]*math.sin(n*wo*t)\n\n g.append((xs,ys))\n return g", "def linear_forward_calculation(A, W, b):\n # Your code here\n # print(W.shape, A.shape, b.shape)\n Z=np.dot(W,A)+b\n\n return Z\n # raise NotImplementedError", "def Jv(t,y,v):\n return A@v", "def affine_forward(x, w, b):\n N = x.shape[0]\n\n # reshape input into rows\n output = x.reshape([N, -1]).dot(w) + b\n cache = (x, w, b)\n\n return output, cache", "def trilinear_interp(img, indices): \n input_array = np.array(img.get_data())\n indices = np.array(indices)\n\n x_indices = indices[:,0]\n y_indices = indices[:,1]\n z_indices = indices[:,2]\n\n # get lower bounds\n x0 = x_indices.astype(np.integer)\n y0 = y_indices.astype(np.integer)\n z0 = z_indices.astype(np.integer)\n\n # get upper bounds0000\n x1 = x0 + 1\n y1 = y0 + 1\n z1 = z0 + 1\n\n # #Check if xyz1 is beyond array boundary:\n x1[np.where(x1==input_array.shape[0])] = x0.max()\n y1[np.where(y1==input_array.shape[1])] = y0.max()\n z1[np.where(z1==input_array.shape[2])] = z0.max()\n\n x = x_indices - x0\n y = y_indices - y0\n z = z_indices - z0\n\n kx = 1 - x\n ky = 1 - y\n kz = 1 - z\n\n #output = input_array[x0,y0,z0]\n #print output\n output = (input_array[x0,y0,z0]*kx*ky*kz +\n input_array[x1,y0,z0]*x*ky*kz +\n input_array[x0,y1,z0]*kx*y*kz +\n input_array[x0,y0,z1]*kx*ky*z +\n input_array[x1,y0,z1]*x*ky*z +\n input_array[x0,y1,z1]*kx*y*z +\n input_array[x1,y1,z0]*x*y*kz +\n input_array[x1,y1,z1]*x*y*z)\n\n return output", "def _eval(self, v):\n Kv = np.zeros(self.K.output_size)\n self.K.forward(v.ravel(), Kv)\n return super(least_squares, self)._eval(Kv - self.offset)", "def vectorMultiply(v, f):\n return [x * f for x in v]", "def affine_forward(x, W, b):\r\n x2d = np.reshape(x, (x.shape[0], -1)) # convert 4D input matrix to 2D \r\n out = np.dot(x2d, W) + b # linear transformation\r\n cache = (x, W, b) # keep for backward step (stay with us)\r\n return out, cache", "def vec_apply_cost(self,\n arr: jnp.ndarray,\n axis: bool = 0,\n fn=None) -> jnp.ndarray:\n x, y = (self.x, self.y) if axis == 0 else (self.y, self.x)\n nx, ny = self._norm_x, self._norm_y\n nx, ny = (nx, ny) if axis == 0 else (ny, nx)\n\n applied_cost = jnp.dot(nx, arr).reshape(1, -1)\n applied_cost += ny.reshape(-1, 1) * jnp.sum(arr, axis=0).reshape(1, -1)\n applied_cost += -2.0 * jnp.dot(y, jnp.dot(x.T, arr))\n return fn(applied_cost) if fn else applied_cost", "def temp_update(self):\n a_w = self.k / self.dx\n a_e = self.k / self.dx\n a_n = self.k / self.dy\n a_s = self.k / self.dy\n a_p = a_w + a_e + a_n + a_s + self.rho * self.cp * self.dx / self.dt\n for i, j in ti.ndrange((1, self.nx - 1), (1, self.ny - 1)):\n self.T[i,\n j] = (a_w * self.T[i - 1, j] + a_e * self.T[i + 1, j] +\n a_s * self.T[i, j - 1] + a_n * self.T[i, j + 1]) / a_p", "def run(self, x):\n T = len(x)\n self.x = x\n self.s = np.zeros((T, self.hidden_size))\n for t in xrange(T):\n self.s[t] = np.dot(self.nodes.u, x[t])\n if self.en_bias: self.s[t] += self.nodes.b\n self.s[t] = self.acfun.compute(np.clip(self.s[t], -50, 50))\n return self.s", "def lfiltic_vec(b, a, y, x=None):\n N = np.size(a) - 1\n M = np.size(b) - 1\n K = max(M, N)\n y = np.asarray(y)\n batch_size = y.shape[0]\n\n if y.dtype.kind in 'bui':\n # ensure calculations are floating point\n y = y.astype(np.float64)\n zi = np.zeros((batch_size, K), y.dtype)\n if x is None:\n x = np.zeros((batch_size, M), y.dtype)\n else:\n x = np.asarray(x)\n L = np.shape(x)[1]\n if L < M:\n x = np.r_[x, np.zeros((batch_size, M - L))]\n L = np.shape(y)[1]\n if L < N:\n y = np.r_[y, np.zeros((batch_size, N - L))]\n\n for m in range(M):\n zi[:, m] = np.sum(b[m + 1:] * x[:, :M - m], axis=1)\n\n for m in range(N):\n zi[:, m] -= np.sum(a[m + 1:] * y[:, :N - m], axis=1)\n\n return zi", "def _lin_solve(b, x, x0, a, c, iterations, n):\n c_recip = 1 / c\n for k in range(0, iterations):\n for m in range(1, n - 1):\n for j in range(1, n - 1):\n for i in range(1, n - 1):\n x[index_of(i, j, m, n)] = (x0[index_of(i, j, m, n)] + a * (x[index_of(i + 1, j, m, n)]\n + x[index_of(i - 1, j, m, n)]\n + x[index_of(i, j + 1, m, n)]\n + x[index_of(i, j - 1, m, n)]\n + x[index_of(i, j, m + 1, n)]\n + x[index_of(i, j, m - 1, n)]\n )) * c_recip\n _set_bounds(b, x, n)", "def inner_product(self, tangent_vec_a, tangent_vec_b, base_point):\n to_squeeze = False\n if tangent_vec_a.ndim == 2 and tangent_vec_b.ndim == 2:\n to_squeeze = True\n if tangent_vec_a.ndim == 2:\n tangent_vec_a = gs.expand_dims(tangent_vec_a, axis=0)\n if tangent_vec_b.ndim == 2:\n tangent_vec_b = gs.expand_dims(tangent_vec_b, axis=0)\n\n point_a = base_point + tangent_vec_a\n point_b = base_point + tangent_vec_b\n inner_prod = gs.zeros((gs.maximum(len(tangent_vec_a), len(tangent_vec_b)), 1))\n if self.a0 > 0 or self.a2 > 0:\n vertex_areas_bp = self._space.vertex_areas(base_point)\n if self.a0 > 0:\n inner_prod += self._inner_product_a0(\n tangent_vec_a, tangent_vec_b, vertex_areas_bp=vertex_areas_bp\n )\n if self.a2 > 0:\n inner_prod += self._inner_product_a2(\n tangent_vec_a,\n tangent_vec_b,\n base_point=base_point,\n vertex_areas_bp=vertex_areas_bp,\n )\n if self.a1 > 0 or self.b1 > 0 or self.c1 > 0 or self.b1 > 0:\n one_forms_bp = self._space.surface_one_forms(base_point)\n surface_metrics_bp = self._space._surface_metric_matrices_from_one_forms(\n one_forms_bp\n )\n normals_bp = self._space.normals(base_point)\n areas_bp = gs.sqrt(gs.linalg.det(surface_metrics_bp))\n\n if self.c1 > 0:\n inner_prod += self._inner_product_c1(\n point_a, point_b, normals_bp, areas_bp\n )\n if self.d1 > 0 or self.b1 > 0 or self.a1 > 0:\n ginv_bp = gs.linalg.inv(surface_metrics_bp)\n one_forms_a = self._space.surface_one_forms(point_a)\n one_forms_b = self._space.surface_one_forms(point_b)\n if self.d1 > 0:\n inner_prod += self._inner_product_d1(\n one_forms_a,\n one_forms_b,\n one_forms_bp,\n areas_bp=areas_bp,\n inv_surface_metrics_bp=ginv_bp,\n )\n\n if self.b1 > 0 or self.a1 > 0:\n dga = (\n gs.matmul(\n one_forms_a, gs.transpose(one_forms_a, axes=(0, 1, 3, 2))\n )\n - surface_metrics_bp\n )\n dgb = (\n gs.matmul(\n one_forms_b, gs.transpose(one_forms_b, axes=(0, 1, 3, 2))\n )\n - surface_metrics_bp\n )\n ginvdga = gs.matmul(ginv_bp, dga)\n ginvdgb = gs.matmul(ginv_bp, dgb)\n inner_prod += self._inner_product_a1(ginvdga, ginvdgb, areas_bp)\n inner_prod += self._inner_product_b1(ginvdga, ginvdgb, areas_bp)\n return gs.squeeze(inner_prod, axis=0) if to_squeeze else inner_prod", "def bezier_surface(A):\r\n n, m, z = A.shape\r\n n, m = n-1, m-1\r\n res = 10\r\n B = np.zeros((res,res,3))\r\n\r\n u = np.linspace(0,1,res)\r\n v = np.linspace(0,1,res)\r\n for i in xrange(res):\r\n for j in xrange(res):\r\n B[i,j,:] = deCasteljua2(A,n,m,u[i],v[j]) \r\n\r\n return B", "def current_update():\n # Compute the multiplier coefficient:\n ci = dt / (L * dx)\n for k in range(0, nx-1):\n I[k] = I[k] - (ci * (V[k + 1] - V[k]))", "def scalar_vector_mult(alpha, v):\n return [alpha*x for x in v]", "def process_batch(self, X, y):\n # normalize to [-1.0, 1.0]\n X = X / 127.5 - 1.0\n\n for i in range(X.shape[0]):\n # scaling and bias for contrast and brightness augmentation\n scale = 1.0 + 0.1 * np.random.randn()\n bias = 0.0 + 0.1 * np.random.randn()\n X[i] = np.clip(scale*X[i] + bias, -1.0, 1.0)\n\n # transformations for geometric augmentations\n angle = 6.0 * np.random.randn()\n zoom = 1 + 0.1 * np.random.randn()\n translation = 2.0 * np.random.randn()\n shear = 0.1 * np.random.randn()\n\n trafo = skimage.transform.AffineTransform(\n translation = translation,\n rotation = np.deg2rad(angle),\n scale = (zoom, zoom),\n shear = shear)\n centered_trafo = (self.postshift + (trafo + self.preshift))\n X[i] = skimage.transform.warp(X[i], centered_trafo, mode = \"edge\", order = 1)\n return X, y", "def forward(self, x):\n out = self.pre_processing(x)\n out = self.a3(out)\n out = self.b3(out)\n out = self.maxpool(out)\n out = self.a4(out)\n out = self.b4(out)\n out = self.c4(out)\n out = self.d4(out)\n out = self.e4(out)\n out = self.maxpool(out)\n out = self.a5(out)\n out = self.b5(out)\n out = self.avgpool(out)\n out = out.view(out.size(0), -1) # reshape the output tensor\n out = self.linear(out)\n\n return out", "def get_batch(self, x, y, t):\n t = (torch.ones(x.shape[0]).int() * t)\n\n if len(self.M) > 0:\n MEM = self.M\n order = np.arange(len(MEM))\n np.random.shuffle(order)\n index = order[:min(x.shape[0], len(MEM))]\n\n x = x.cpu()\n y = y.cpu()\n\n for k, idx in enumerate(index):\n ox, oy, ot = MEM[idx]\n x = torch.cat((x, ox.unsqueeze(0)), 0)\n y = torch.cat((y, oy.unsqueeze(0)), 0)\n t = torch.cat((t, ot.unsqueeze(0)), 0)\n\n # handle gpus if specified\n if self.cuda:\n x = x.cuda()\n y = y.cuda()\n t = t.cuda()\n\n return x, y, t", "def _dot_product_attention_inner_relative(x, y, z, transpose):\n batch_size, heads, length, _ = x.size()\n\n # xy_matmul is [batch_size, heads, length, length or depth]\n xy_matmul = torch.matmul(x, y if not transpose else y.transpose(-2, -1))\n # x_t is [length, batch_size, heads, length or depth]\n x_t = x.permute(2, 0, 1, 3)\n # x_t_r is [length, batch_size * heads, length or depth]\n x_t_r = x_t.view(length, batch_size * heads, -1)\n # x_tz_matmul is [length, batch_size * heads, length or depth]\n x_tz_matmul = torch.matmul(x_t_r, z if not transpose else z.transpose(-2, -1))\n # x_tz_matmul_r is [length, batch_size, heads, length or depth]\n x_tz_matmul_r = x_tz_matmul.view(length, batch_size, heads, -1)\n # x_tz_matmul_r_t is [batch_size, heads, length, length or depth]\n x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)\n\n return xy_matmul + x_tz_matmul_r_t", "def __call__(self, T, X):\n dims = (T.shape[0], 1, 1)\n\n a = X[:, 0:1].reshape(dims)\n e = X[:, 1:2].reshape(dims)\n i = X[:, 2:3].reshape(dims)\n w = X[:, 4:5].reshape(dims)\n f = X[:, 5:6].reshape(dims)\n\n sf = np.sin(f)\n cf = np.cos(f)\n st = np.sin(f + w)\n ct = np.cos(f + w)\n si = np.sin(i)\n ci = np.cos(i)\n p = a * (1. - e**2)\n r = p / (1. + e*cf)\n h = (self.mu * p)**.5\n zero = np.zeros(dims)\n\n adot = np.concatenate((e*sf, p/r, zero), axis=2) * 2*a**2/h\n edot = np.concatenate((p*sf, (p+r)*cf + r*e, zero), axis=2) / h\n idot = np.concatenate((zero, zero, r*ct/h), axis=2)\n Wdot = np.concatenate((zero, zero, r*st/h/si), axis=2)\n wdot = np.concatenate((-p*cf/e, (p+r)*sf/e, -r*st*ci/si), axis=2) / h\n fdot = np.concatenate((p*cf, -(p+r)*sf, zero), axis=2) / h / e\n\n return np.concatenate((adot, edot, idot, Wdot, wdot, fdot), axis=1)", "def vecteur_image(T,x,y):\n a,b,c,d = T[0][0],T[0][1],T[1][0],T[1][1]\n xx = a*x + b*y\n yy = c*x + d*y\n return xx,yy", "def regularize_bwd(X, y, mu0, mu1, v1, nz, K, verbose=False):\n \n if verbose: sss=0#print '\\ncompute bath between mu=%.4f and mu=%.4f'%(mu0, mu1)\n \n n, m = X.shape\n X_nz = np.atleast_2d(X[:, nz])\n b = np.dot(X.T, y)\n G = np.dot(X.T, X)\n \n nbr = 0\n mu = mu0\n trans_type = -1\n trans_sign = 0\n trans_ind = -1\n if verbose: nbr=0#print 'initial active features =', nz\n \n while mu > mu1:\n \n # find the breakpoints where coefficients become zero\n b_nz = b[nz]\n Kv1 = np.dot(K, v1)\n Kb_nz = np.dot(K, b_nz)\n mu_0 = Kb_nz / Kv1\n \n # find the breakpoints where new coefficients become active\n z = np.setdiff1d(np.arange(m), nz)\n X_z = np.atleast_2d(X[:, z])\n b_z = b[z]\n M = G[np.ix_(z, nz)]\n MKb_nz = np.dot(M, Kb_nz)\n MKv1 = np.dot(M, Kv1)\n mu_1 = (b_z - MKb_nz) / (1 - MKv1)\n mu_m1 = (b_z - MKb_nz) / (-1 - MKv1)\n \n if trans_type > 0: mu_0[-1] = mu1\n mu_0[mu_0 >= mu] = mu1\n if len(mu_0) > 0: \n mu_0_argmax = mu_0.argmax()\n mu_0_max = mu_0[mu_0_argmax][0]\n else:\n mu_0_max = mu1\n if trans_type == 0:\n if trans_sign == 1: mu_1[np.where(z == trans_ind)[0]] = mu1 - 1\n else: mu_m1[np.where(z == trans_ind)[0]] = mu1 - 1\n mu_1[mu_1 >= mu] = mu1\n if len(mu_1) > 0: \n mu_1_argmax = mu_1.argmax()\n mu_1_max = mu_1[mu_1_argmax][0]\n else:\n mu_1_max = mu1\n mu_m1[mu_m1 >= mu] = mu1\n if len(mu_m1) > 0: \n mu_m1_argmax = mu_m1.argmax()\n mu_m1_max = mu_m1[mu_m1_argmax][0]\n else:\n mu_m1_max = mu1\n \n # compute the breakpoint\n mu_br_all = np.array([mu_0_max, mu_1_max, mu_m1_max])\n trans_type = mu_br_all.argmax()\n mu_br = mu_br_all[trans_type]\n \n if mu_br > mu1:\n \n nbr += 1\n mu = mu_br\n \n if trans_type == 0: # an element of theta(t) goes to zero\n trans_ind = nz[mu_0_argmax]\n trans_sign = v1[mu_0_argmax]\n if verbose: sss=0#print 'transition point :: mu = %.4f :: feature %d is inactive'%(mu, trans_ind)\n nzind = range(len(nz))\n rr=np.where(nz==trans_ind)[0][0]\n #print 'longa:',len(nz),len(nzind),len(v1)\n #print 'c:',nz.index(trans_ind)\n nzind=np.delete(nzind,rr)#nzind=np.delete(nzind,np.where(nzind==nz.index(trans_ind)))#nzind.remove(nz.index(trans_ind))\n v1 = v1[nzind]\n nz=np.delete(nz,rr)#nz=np.delete(nz,np.where(nz==trans_ind))#nz.remove(trans_ind)\n #print 'longa2:',len(nz),len(nzind),len(v1)\n X_nz = X[:, nz]\n K = invupdatered(K, mu_0_argmax)\n else: # new active element\n if trans_type == 1: # it is positive\n trans_ind = z[mu_1_argmax]\n if verbose: sss=0#print 'transition point :: mu = %.4f :: feature %d is positive'%(mu, trans_ind)\n nz=np.append(nz,trans_ind)#nz.append(trans_ind)\n v1 = np.vstack([v1, 1])\n else: # it is negative\n trans_ind = z[mu_m1_argmax]\n if verbose: sss=0#print 'transition point :: mu = %.4f :: feature %d is negative'%(mu, trans_ind)\n nz=np.append(nz,trans_ind)#nz.append(trans_ind)\n v1 = np.vstack([v1, -1])\n X_new = np.atleast_2d(X[:, trans_ind]).T\n K = invupdateapp(K, np.dot(X_nz.T, X_new), np.dot(X_new.T, X_nz), \n np.dot(X_new.T, X_new))\n X_nz = X[:, nz]\n \n else: # compute solution at mu1\n \n if verbose: sss=0#print 'compute solution at mu =', mu1\n theta_nz = Kb_nz - mu1*Kv1\n mu = mu1\n \n return theta_nz, nz, K, nbr", "def take_EM_step(X, pi, A, B):\n # TODO: Write this function.\n pi_prime = np.zeros(pi.shape[0])\n A_prime = np.zeros((A.shape[0], A.shape[1]))\n B_prime = np.zeros((B.shape[0], B.shape[1]))\n for i in range(X.shape[0]):\n alpha = forward(X[i], pi, A, B)\n beta = backward(X[i], pi, A, B)\n p_xn = np.sum(alpha[X.shape[1]-1])\n\n #pi update\n alpha_0 = alpha[0] # alpha_0, i\n beta_0 = beta[0]\n pi_update = np.multiply(alpha_0, beta_0) / p_xn\n pi_prime += pi_update\n\n #a update\n for i_val in range(A.shape[0]):\n for j_val in range(A.shape[1]):\n tem = 0\n for t_val in range(X.shape[1]-1):\n tem += alpha[t_val][i_val] * A[i_val][j_val] * B[j_val][X[i][t_val+1]] * beta[t_val+1][j_val]\n tem = tem / p_xn\n A_prime[i_val][j_val] += tem\n\n #b update\n for k in range(B.shape[1]):\n b_sum = np.zeros(2)\n for p in range(X.shape[1]):\n if X[i][p] == k:\n alpha_t_i = alpha[p]\n beta_t_i = beta[p]\n b_sum += np.multiply(alpha_t_i, beta_t_i)\n b_sum = b_sum / p_xn\n B_prime[:, k] += b_sum\n\n #normalization\n pi_prime = pi_prime/pi_prime.sum()\n for e in range(A_prime.shape[0]):\n A_prime[e] = A_prime[e]/A_prime[e].sum()\n for d in range(B_prime.shape[0]):\n B_prime[d] = B_prime[d]/B_prime[d].sum()\n return (pi_prime, A_prime, B_prime)", "def cg(A, b, x=None):\n n = len(b)\n if not x:\n x = np.ones([n,1])\n r = np.dot(A, x) - b\n p = - r\n # r_k_norm = np.dot(r, r)\n r_k_norm = np.linalg.norm ( r )*np.linalg.norm ( r )\n for i in range(2*n):\n Ap = np.dot(A, p)\n alpha = r_k_norm / p.T@Ap\n try:\n x += alpha * p\n except:\n pass\n r += alpha * Ap\n r_kplus1_norm = np.linalg.norm ( r )*np.linalg.norm ( r )\n beta = r_kplus1_norm / r_k_norm\n r_k_norm = r_kplus1_norm\n if r_kplus1_norm < 1e-5:\n break\n p = beta * p - r\n return x", "def f_raw(x, a, b):\n return a * x + b", "def affine(a, u, v):\n\n return _add(_multiply(a, u, 257), v)", "def state_eq(t, x, u, tu):\n # u = u[0, :]\n u_interp = np.interp(t, tu, u[0, :])\n # print(f'u: {u}')\n return np.vstack((x[1], -x[1] + u_interp))", "def affine_forward(x, w, b):\n out = None\n x_shape = x.shape\n x_reshaped = x.reshape(x_shape[0], np.prod(x_shape[1:]))\n out = np.dot(x_reshaped, w) + b\n cache = (x, w, b)\n return out, cache", "def bilinear_sampler(img, x, y):\n # prepare useful params\n B = tf.shape(img)[0]\n H = tf.shape(img)[1]\n W = tf.shape(img)[2]\n C = tf.shape(img)[3]\n\n max_y = tf.cast(H - 1, 'int32')\n max_x = tf.cast(W - 1, 'int32')\n zero = tf.zeros([], dtype='int32')\n\n # cast indices as float32 (for rescaling)\n x = tf.cast(x, 'float32')\n y = tf.cast(y, 'float32')\n\n # rescale x and y to [0, W/H]\n x = 0.5 * ((x + 1.0) * tf.cast(W, 'float32'))\n y = 0.5 * ((y + 1.0) * tf.cast(H, 'float32'))\n\n # grab 4 nearest corner points for each (x_i, y_i)\n # i.e. we need a rectangle around the point of interest\n x0 = tf.cast(tf.floor(x), 'int32')\n x1 = x0 + 1\n y0 = tf.cast(tf.floor(y), 'int32')\n y1 = y0 + 1\n\n # clip to range [0, H/W] to not violate img boundaries\n x0 = tf.clip_by_value(x0, zero, max_x)\n x1 = tf.clip_by_value(x1, zero, max_x)\n y0 = tf.clip_by_value(y0, zero, max_y)\n y1 = tf.clip_by_value(y1, zero, max_y)\n\n # get pixel value at corner coords\n Ia = get_pixel_value(img, x0, y0)\n Ib = get_pixel_value(img, x0, y1)\n Ic = get_pixel_value(img, x1, y0)\n Id = get_pixel_value(img, x1, y1)\n\n # recast as float for delta calculation\n x0 = tf.cast(x0, 'float32')\n x1 = tf.cast(x1, 'float32')\n y0 = tf.cast(y0, 'float32')\n y1 = tf.cast(y1, 'float32')\n\n # calculate deltas\n wa = (x1 - x) * (y1 - y)\n wb = (x1 - x) * (y - y0)\n wc = (x - x0) * (y1 - y)\n wd = (x - x0) * (y - y0)\n\n # add dimension for addition\n wa = tf.expand_dims(wa, axis=3)\n wb = tf.expand_dims(wb, axis=3)\n wc = tf.expand_dims(wc, axis=3)\n wd = tf.expand_dims(wd, axis=3)\n\n # compute output\n out = tf.add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id])\n\n\n return out", "def batch_outer_product(a, b):\n a, b = normalize_and_check_ndim([a, b], 2)\n # This is a batchwise version of the matrix multiplication approach\n # used for outer_product(), see explanation there.\n return a[:, :, np.newaxis] * b[:, np.newaxis, :]", "def compose_transform(T1, T2):\n aux_vec = np.array([0, 0, 1]).reshape(1, 3)\n\n T1 = np.concatenate((T1, aux_vec), axis=0)\n T2 = np.concatenate((T2, aux_vec), axis=0)\n\n T1_inv = np.linalg.inv(T1)\n T = T1_inv@T2\n\n return T[0:2]", "def k__bq__lLambda____(*tplVctAg): #`Λ(v0,v1,v2,..) edge product\n def getTensor(tplIndex, *tplVctAg):\n kryAt = sf.krry(tplVctAg[tplIndex[0]])\n for i in range(1, len(tplIndex)):\n kryAt = kryAt ^ sf.krry(tplVctAg[tplIndex[i]])\n return kryAt\n\n n = len(tplVctAg)\n \n # confirm that all elements in tplVctAg are same length vector\n lenAt = len(tplVctAg[0])\n for k in range(n):\n assert lenAt == len(tplVctAg[k])\n\n krryAt = sf.kzrs(*([len(tplVctAg[0])]*n))\n for index, sign in sf.permutate(range(n),1):\n krryAt = krryAt + sign * getTensor(index, *tplVctAg)\n\n return krryAt", "def interpolate (u, v, x):\r\n if x[0] < 0:\r\n x[0] = 0\r\n if x[0] > width-2:\r\n x[0] = width-2\r\n if x[1] < 0:\r\n x[1] = 0\r\n if x[1] > height-2:\r\n x[1] = height-2\r\n \r\n xi = int(x[1])\r\n xj = int(x[0])\r\n\r\n ax = (x[0] - xj) / h\r\n ay = (x[1] - xi) / h\r\n\r\n # Bilinear interpolation in 2D\r\n uij = (1-ax)*(1-ay)*u[xi,xj] + (1-ax)*ay*u[xi+1,xj] + ax*(1-ay)*u[xi,xj+1] + ax*ay*u[xi+1,xj+1]\r\n vij = (1-ax)*(1-ay)*v[xi,xj] + (1-ax)*ay*v[xi+1,xj] + ax*(1-ay)*v[xi,xj+1] + ax*ay*v[xi+1,xj+1]\r\n\r\n return uij, vij", "def forward(self, x):\n n, c, t, v = x.size()\n x1 = x.view(n, c * t, v)\n y = None\n for i in range(self.num_subset):\n A1 = self.PA[i]\n z = self.conv_d[i](torch.matmul(x1, A1).view(n, c, t, v))\n y = z + y if y is not None else z\n A2 = self.cen(x)\n z2 = torch.matmul(x1, A2).view(n, c, t, v)\n z2 = self.conv_cen(z2)\n y += self.lamb * z2\n y = self.bn(y)\n y += self.down(x)\n y = self.relu(y)\n y = self.attention(y)\n return y", "def prediction2d(self, X, t):\n self.A = self.createA(t)\n X = self.A.dot(X)\n return X", "def forward_substitution(l, b):\n y = np.zeros(b.shape[0])\n y[0] = b[0] / l[0, 0]\n for i in range(1, b.shape[0]):\n _sum = np.sum(l[i, :i] * y[:i])\n y[i] = (b[i] - _sum) / l[i, i]\n return y", "def f(X, params):\r\n x_i, x_i1, y_i, y_i1, y_ip, y_ip1 = params\r\n A = (x_i1 - X) / (x_i1 - x_i)\r\n B = (X - x_i) / (x_i1 - x_i)\r\n C = (1./6) * (A**3 - A) * (x_i1-x_i)**2 \r\n D = (1./6) * (B**3 - B) * (x_i1-x_i)**2\r\n return A*y_i + B*y_i1 + C*y_ip + D*y_ip1", "def f(self, x: np.array) -> np.array:\n return self.a * x**2 + self.b * x + self.c", "def trans_to_coordinates(T, pts):\n p = []\n for i in range(len(pts)):\n \n p_b = [pts[i][0], pts[i][1], pts[i][2], 1]\n p_a = np.matmul(T, p_b).tolist()\n p.append(p_a[0:3])\n\n return p", "def trans_to_coordinates(T, pts):\n p = []\n for i in range(len(pts)):\n \n p_b = [pts[i][0], pts[i][1], pts[i][2], 1]\n p_a = np.matmul(T, p_b).tolist()\n p.append(p_a[0:3])\n\n return p", "def _proj(u,v):\n return (np.einsum('i...,i...->...',u,v)/np.einsum('i...,i...->...',u,u))*u" ]
[ "0.59027725", "0.58240545", "0.57738847", "0.5771491", "0.56887174", "0.5658218", "0.56540334", "0.56478906", "0.5588295", "0.55453885", "0.5531856", "0.55062973", "0.548991", "0.5472191", "0.54176724", "0.541392", "0.5401743", "0.5387512", "0.53842276", "0.53838414", "0.53745127", "0.5367906", "0.5361873", "0.53534406", "0.5347286", "0.5338383", "0.5333888", "0.5319858", "0.53193945", "0.5316163", "0.5309", "0.53076404", "0.53014904", "0.53003144", "0.52879775", "0.5286607", "0.5240997", "0.52069676", "0.520357", "0.51858145", "0.51820576", "0.5178266", "0.5166226", "0.5161592", "0.51557606", "0.51407754", "0.5140632", "0.51358414", "0.5135756", "0.5135309", "0.5133662", "0.51320857", "0.5131872", "0.5125785", "0.51216996", "0.5113302", "0.51106447", "0.51060855", "0.5103371", "0.5099249", "0.50957537", "0.50943875", "0.507997", "0.5078571", "0.50734466", "0.50733554", "0.5060056", "0.5056312", "0.5053306", "0.5052806", "0.5050871", "0.504978", "0.5043587", "0.50376326", "0.50320625", "0.5029667", "0.50295883", "0.5025888", "0.5023104", "0.5019403", "0.50147015", "0.5011881", "0.50064164", "0.50061464", "0.49874008", "0.49829483", "0.497642", "0.497638", "0.49762216", "0.49752173", "0.4973927", "0.497181", "0.49708557", "0.49706775", "0.4968442", "0.49684325", "0.49659577", "0.49625823", "0.49625823", "0.49603912" ]
0.6895596
0
Performs a batched inner product over the last dimension. Replacement for deprecated `from numpy.core.umath_tests import inner1d`.
def batch_inner(a: np.ndarray, b: np.ndarray, verify: bool = True) -> np.ndarray: if verify and a.shape != b.shape: raise ValueError("All dimensions have to be equal") if a.shape[-1] == 0: return np.empty_like(a) return np.einsum("...i,...i->...", a, b) # faster than np.sum(a * b, axis=-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_outer_product(a, b):\n a, b = normalize_and_check_ndim([a, b], 2)\n # This is a batchwise version of the matrix multiplication approach\n # used for outer_product(), see explanation there.\n return a[:, :, np.newaxis] * b[:, np.newaxis, :]", "def outer_product(input_sets, axis=0):\n out = cartesian_product(input_sets)\n return np.prod(out, axis=axis)\n\n # try:\n # from pyapprox.cython.utilities import outer_product_pyx\n # # fused type does not work for np.in32, np.float32, np.int64\n # # so envoke cython cast\n # if np.issubdtype(input_sets[0][0], np.signedinteger):\n # return outer_product_pyx(input_sets, 1)\n # if np.issubdtype(input_sets[0][0], np.floating):\n # return outer_product_pyx(input_sets, 1.)\n # else:\n # return outer_product_pyx(input_sets, input_sets[0][0])\n # except ImportError:\n # print('outer_product extension failed')\n\n # num_elems = 1\n # num_sets = len(input_sets)\n # sizes = np.empty((num_sets), dtype=int)\n # for ii in range(num_sets):\n # sizes[ii] = len(input_sets[ii])\n # num_elems *= sizes[ii]\n\n # # try:\n # # from pyapprox.weave import c_outer_product\n # # return c_outer_product(input_sets)\n # # except:\n # # print ('outer_product extension failed')\n\n # result = np.empty((num_elems), dtype=type(input_sets[0][0]))\n # for ii in range(num_elems):\n # result[ii] = 1.0\n # multi_index = ind2sub(sizes, ii, num_elems)\n # for jj in range(num_sets):\n # result[ii] *= input_sets[jj][multi_index[jj]]\n\n # return result", "def batch_outer(a: np.ndarray, b: np.ndarray, verify: bool = True) -> np.ndarray:\n\n if verify and a.shape[:-1] != b.shape[:-1]:\n raise ValueError(\"All except the last dimension have to be equal\")\n\n return np.einsum(\"...i,...j->...ij\", a, b) # slightly faster than np.multiply(a[...,:,None], b[...,None,:])", "def outer_product(x):\n return keras.backend.batch_dot(\n x[0]\n , x[1]\n , axes=[1,1]\n ) / x[0].get_shape().as_list()[1]", "def outer_product(a, b, batch=False):\n if batch:\n return batch_outer_product(a, b)\n a, b = normalize_and_check_ndim([a, b], 1)\n # The outer product is equivalent to matrix multiplication a * b\n # where the vector a is interpreted as a column matrix and the\n # vector b as a row matrix. The following reshaping and\n # multiplication accomplishes this.\n return a[:, np.newaxis] * b[np.newaxis, :]", "def inner_product_batch(pattern_stack_one, pattern_num_one, pattern_stack_two, pattern_num_two):\n\n \"\"\"\n Notice that the two stacks can be different. So we can not deduce the lower triangular pattern from the \n other half.\n \"\"\"\n holder = np.zeros((pattern_num_one, pattern_num_two))\n for l in range(pattern_num_one):\n for m in range(pattern_num_two):\n holder[l, m] = np.sum(np.multiply(pattern_stack_one[l], pattern_stack_two[m]))\n\n return holder", "def inner_product(alpha, F, beta):\n return np.dot(alpha, np.dot(F, beta))", "def batch_outer_sum(a, b):\n a, b = normalize_and_check_ndim([a, b], 2)\n # Due to broadcasting, this sum works analogously to batch matrix\n # multiplication. See also comments in batch_outer_product().\n return a[:, :, np.newaxis] + b[:, np.newaxis, :]", "def tensor_outer_product(tensor1, tensor2):\n shape_1 = tensor1.shape\n shape_2 = tensor2.shape\n s1 = len(shape_1)\n s2 = len(shape_2)\n \n shape_1 = shape_1 + (1, )*s2\n shape_2 = (1, )*s1 + shape_2\n return np.reshape(tensor1, shape_1) * np.reshape(tensor2, shape_2)", "def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]", "def interior_tensor_product(mx, dim_a, dim_b, e=None):\n assert _np.shape(mx) == (dim_a * dim_b, dim_a * dim_b), \"Dimensions do not agree with matrix size\"\n assert _np.shape(e)[0] == _np.shape(e)[1], \"e should be a square matrix\"\n basis_a = matrix_units(dim_a)\n basis_b = matrix_units(dim_b)\n return sum((_np.trace(_np.dot(mx, _np.kron(unit_a, unit_b).T)) * multikron([unit_a, e, unit_b])\n for unit_a in basis_a for unit_b in basis_b))", "def _outer(a, b):\n a_flat = torch.reshape(a, [-1])\n b_flat = torch.reshape(b, [-1])\n a_mul = torch.unsqueeze(a_flat, dim=-1)\n b_mul = torch.unsqueeze(b_flat, dim=0)\n return a_mul * b_mul", "def inner_product(state_1, state_2):\n return numpy.dot(state_1.conjugate(), state_2)", "def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z", "def outer(a, b):\n m, n = a.size, b.size\n\n if m * n < 2**14:\n return mul_dense(a.reshape(m, 1), b.reshape(1, n))\n\n out = np.empty((m, n), dtype=common_type(a, b))\n _outer_par(a.ravel(), b.ravel(), out, m, n)\n\n return out", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def outer_product(A, B): \n print(A)\n print(B)\n A_rows = len(A)\n A_columns = len(A[0])\n\n B_rows = len(B)\n B_columns = len(B[0])\n \n if A_columns == 1 and B_rows == 1:\n \n outer_product = []\n\n # multi-line list comprehension for outer product\n [outer_product.append([A[i][0] * B[0][j] for j in range(B_columns)]) \n for i in range(A_rows)]\n\n return outer_product\n\n else:\n print(\"dimensions of vector do not match.\")", "def outer(a, b=None):\n if b is None:\n b = a\n size_a = tuple(a.size()) + (b.size()[-1],)\n size_b = tuple(b.size()) + (a.size()[-1],)\n a = a.unsqueeze(dim=-1).expand(*size_a)\n b = b.unsqueeze(dim=-2).expand(*size_b)\n return a, b", "def outer(x, y):\r\n if x.ndim != 1:\r\n x = x.flatten()\r\n if y.ndim != 1:\r\n y = y.flatten()\r\n return dot(\r\n x.dimshuffle(0, 'x'),\r\n y.dimshuffle('x', 0))", "def matmul(self, x, work_buffer):\n\n x = asarray(x)\n space = work_buffer.flat\n\n if (x.ndim == 0):\n ValueError(\n \"matmul: Input operand 1 does not have enough dimensions \"\n \"(has 0, gufunc core with signature (n?,k),(k,m?)->(n?,m?) \"\n \"requires 1\"\n )\n\n if x.ndim == 1 and self.ndim == 1:\n # Dot product\n if x.shape[0] == self._size:\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n out = matmul(self[self._begin:], x[:k]).view(ndarray)\n out += matmul(self[:self._end], x[k:])\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n out = matmul(part, x).view(ndarray)\n\n return(out)\n else:\n raise ValueError(\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?)\"\n \" (size {n} is different from {m})\".format(\n n=self._size,\n m=x.shape[0]\n )\n )\n elif self.ndim == 1 and x.ndim > 1:\n if self._size == x.shape[-2]:\n out_shape = *x.shape[:-2], x.shape[-1]\n out = empty(out_shape)\n out2 = space[:reduce(operator.mul, out_shape)].reshape(\n out_shape\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n matmul(self[self._begin:], x[..., :k, :], out)\n out += matmul(self[:self._end], x[..., k:, :], out2)\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(part, x, out).view(ndarray)\n\n return(out)\n else:\n raise ValueError(\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?)\"\n \" (size {n} is different from {m})\".format(\n n=self.shape[-2],\n m=x.shape[0]\n )\n )\n elif self.ndim == 2:\n if (self.shape[-1] == x.shape[-2]):\n out = empty(\n (*x.shape[:-2], self.shape[-1], x.shape[-2])\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n matmul(self[self._begin:], x, out[..., :k, :])\n matmul(self[:self._end], x, out[..., k:, :])\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(part, x, out)\n\n return(out.view(ndarray))\n\n else:\n raise ValueError(\n (\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->\"\n \"(n?,m?) (size {n} is different from {m})\"\n ).format(\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )\n else:\n if (self.shape[-1] == x.shape[-2]):\n self_shape = (self._size, *self.shape[1:-2])\n\n starexpr = tuple(\n zip_longest(self_shape, x.shape[:-2], fillvalue=1)\n )\n if star_can_broadcast(starexpr):\n broadcast_shape = tuple(\n starmap(\n lambda a, b: max(a, b),\n starexpr\n )\n )\n\n out = empty(\n (*broadcast_shape, self.shape[-2], x.shape[-1])\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n if x.ndim > 2:\n matmul(self[self._begin:], x[:k], out[:k])\n matmul(self[:self._end], x[k:], out[k:])\n else:\n matmul(self[self._begin:], x, out[:k])\n matmul(self[:self._end], x, out[k:])\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(part, x, out)\n\n return(out.view(ndarray))\n else:\n raise ValueError(\n (\n \"operands could not be broadcast together with\"\n \"remapped shapes [original->remapped]: \"\n \"{shape_b}->({shape_bn}, newaxis,newaxis) \"\n \"{shape_a}->({shape_an}, newaxis,newaxis) \"\n \"and requested shape ({n},{m})\"\n ).format(\n shape_a=self_shape,\n shape_b=x.shape,\n shape_an=self.shape[:-2].__str__()[:-1],\n shape_bn=x.shape[:-2].__str__()[:-1],\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )\n else:\n raise ValueError(\n (\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->\"\n \"(n?,m?) (size {n} is different from {m})\"\n ).format(\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )", "def outer_sum(a, b, batch=False):\n if batch:\n return batch_outer_sum(a, b)\n # TODO: naming. Surely this has to be called something sensible?\n a, b = normalize_and_check_ndim([a, b], 1)\n # Due to broadcasting, this sum works analogously to matrix\n # multiplication. See also comments in outer_product().\n return a[:, np.newaxis] + b[np.newaxis, :]", "def outer_prod(x, y):\n if len(list(x.size())) != 2 or len(list(y.size())) != 2:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(2, x.size()[1], y.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.ger(x[0], y[0]) - torch.ger(x[1], -y[1])\n z[1] = torch.ger(x[0], -y[1]) + torch.ger(x[1], y[0])\n\n return z", "def test_multidimensional_operation(self):\n # start with something (1, 2, 3)\n data = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]])\n\n # split 1st dim (2, 2, 3)\n coefficients = np.ones((1, 2)) / 2\n expected = np.array(\n [[[0.0, 0.5, 1.0], [1.5, 2.0, 2.5]], [[0.0, 0.5, 1.0], [1.5, 2.0, 2.5]]]\n )\n actual = Adaptor.convert_with_coefficients(data, coefficients, 0)\n np.testing.assert_allclose(actual, expected)\n\n # sum 3rd dim (2, 2, 1)\n coefficients = np.ones((3, 1))\n expected = np.array([[[1.5], [6.0]], [[1.5], [6.0]]])\n actual = Adaptor.convert_with_coefficients(actual, coefficients, 2)\n np.testing.assert_allclose(actual, expected)", "def _flatten_batch(self, matrix_tups):\n out_vecs = []\n for t in matrix_tups:\n for v in t:\n new_shape = (v.shape[0],)\n if len(v.shape) > 1:\n new_shape = new_shape + (np.prod(v.shape[1:]),)\n out_vecs.append(v.reshape(new_shape))\n return jnp.concatenate(out_vecs, axis=1)", "def inner_products(t_S, t_Var, t_XS, t_YS, t_XE, t_YE, t_XR, t_YR):\n\n # Note in this computation, we do the indices in this form:\n # b, i, j, t\n # batch, pixel, neuron, time step\n\n # indices: b, i1, j, t\n t_dX = (t_XS.dimshuffle('x', 0, 'x', 'x') -\n t_XE.dimshuffle('x', 'x', 0, 'x') -\n t_XR.dimshuffle(0, 'x', 'x', 1))\n t_dX.name = 'dX'\n # indices: b, i2, j, t\n t_dY = (t_YS.dimshuffle('x', 0, 'x', 'x') -\n t_YE.dimshuffle('x', 'x', 0, 'x') -\n t_YR.dimshuffle(0, 'x', 'x', 1))\n t_dY.name = 'dY'\n\n # Use outer product trick to dot product image with point filters\n t_PixRFCouplingX = T.exp(-0.5 * t_dX ** 2 /\n t_Var.dimshuffle('x', 0, 'x', 'x'))\n t_PixRFCouplingY = T.exp(-0.5 * t_dY ** 2 /\n t_Var.dimshuffle('x', 0, 'x', 'x'))\n t_PixRFCouplingX.name = 'PixRFCouplingX'\n t_PixRFCouplingY.name = 'PixRFCouplingY'\n\n # Matrix of inner products between the images and the retinal RFs\n # indices: b, j, t\n # Sum_i2 T(i2, i1) * T(b, i2, j, t) = T(b, i1, j, t)\n t_IpsY = T.sum(t_S.dimshuffle('x', 0, 1, 'x', 'x') *\n t_PixRFCouplingY.dimshuffle(0, 1, 'x', 2, 3),\n axis=1)\n # Sum_i1 T(b, i1, j, t) * T(b, i2, j, t) = T(b, j, t)\n t_Ips = T.sum(t_IpsY * t_PixRFCouplingX, axis=1)\n t_Ips.name = 'Ips'\n\n # For the gradient, we also prepare d Ips / dS\n # This is in the form b, i2, i1, j, t\n t_PixRFCoupling = (t_PixRFCouplingX.dimshuffle(0, 'x', 1, 2, 3) *\n t_PixRFCouplingY.dimshuffle(0, 1, 'x', 2, 3))\n\n return t_Ips, t_PixRFCoupling", "def _dot_product_attention_inner_relative(x, y, z, transpose):\n batch_size, heads, length, _ = x.size()\n\n # xy_matmul is [batch_size, heads, length, length or depth]\n xy_matmul = torch.matmul(x, y if not transpose else y.transpose(-2, -1))\n # x_t is [length, batch_size, heads, length or depth]\n x_t = x.permute(2, 0, 1, 3)\n # x_t_r is [length, batch_size * heads, length or depth]\n x_t_r = x_t.view(length, batch_size * heads, -1)\n # x_tz_matmul is [length, batch_size * heads, length or depth]\n x_tz_matmul = torch.matmul(x_t_r, z if not transpose else z.transpose(-2, -1))\n # x_tz_matmul_r is [length, batch_size, heads, length or depth]\n x_tz_matmul_r = x_tz_matmul.view(length, batch_size, heads, -1)\n # x_tz_matmul_r_t is [batch_size, heads, length, length or depth]\n x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)\n\n return xy_matmul + x_tz_matmul_r_t", "def calc_batch_dot_product_3d2d(Tbs, zc, use_gpu):\n\n if (use_gpu):\n return _calc_batch_dot_product_3d2d_gpu(Tbs, zc)\n\n #Get array dims to reshape model array to 2d\n nz = zc.shape[0]\n nrows = Tbs[0].shape[0]\n model = (Tbs@zc[:, :, None]).reshape((nz, nrows))\n return model", "def _inner_product_d1(\n self, one_forms_a, one_forms_b, one_forms_bp, areas_bp, inv_surface_metrics_bp\n ):\n one_forms_bp_t = gs.transpose(one_forms_bp, (0, 2, 1))\n\n one_forms_a_t = gs.transpose(one_forms_a, (0, 1, 3, 2))\n xa = one_forms_a_t - one_forms_bp_t\n\n xa_0 = gs.matmul(\n gs.matmul(one_forms_bp_t, inv_surface_metrics_bp),\n gs.matmul(gs.transpose(xa, (0, 1, 3, 2)), one_forms_bp_t)\n - gs.matmul(one_forms_bp, xa),\n )\n\n one_forms_b_t = gs.transpose(one_forms_b, (0, 1, 3, 2))\n xb = one_forms_b_t - one_forms_bp_t\n xb_0 = gs.matmul(\n gs.matmul(one_forms_bp_t, inv_surface_metrics_bp),\n gs.matmul(gs.transpose(xb, (0, 1, 3, 2)), one_forms_bp_t)\n - gs.matmul(one_forms_bp, xb),\n )\n\n return self.d1 * gs.sum(\n gs.einsum(\n \"...bii->...b\",\n gs.matmul(\n xa_0,\n gs.matmul(\n inv_surface_metrics_bp, gs.transpose(xb_0, axes=(0, 1, 3, 2))\n ),\n ),\n )\n * areas_bp\n )", "def prod(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.multiply.reduce(\n self, out=out, axis=axis, keepdims=keepdims, dtype=dtype\n )", "def two_dim(a: cython.double[:,:]):\n a[0,0] *= 3\n return a[0,0], a[0,1], a.ndim", "def cartesian_product(input_sets, elem_size=1):\n import itertools\n out = []\n # ::-1 reverse order to be backwards compatiable with old\n # function below\n for r in itertools.product(*input_sets[::-1]):\n out.append(r)\n out = np.asarray(out).T[::-1, :]\n return out\n\n # try:\n # from pyapprox.cython.utilities import cartesian_product_pyx\n # # # fused type does not work for np.in32, np.float32, np.int64\n # # # so envoke cython cast\n # # if np.issubdtype(input_sets[0][0],np.signedinteger):\n # # return cartesian_product_pyx(input_sets,1,elem_size)\n # # if np.issubdtype(input_sets[0][0],np.floating):\n # # return cartesian_product_pyx(input_sets,1.,elem_size)\n # # else:\n # # return cartesian_product_pyx(\n # # input_sets,input_sets[0][0],elem_size)\n # # always convert to float then cast back\n # cast_input_sets = [np.asarray(s, dtype=float) for s in input_sets]\n # out = cartesian_product_pyx(cast_input_sets, 1., elem_size)\n # out = np.asarray(out, dtype=input_sets[0].dtype)\n # return out\n # except:\n # print('cartesian_product extension failed')\n\n # num_elems = 1\n # num_sets = len(input_sets)\n # sizes = np.empty((num_sets), dtype=int)\n # for ii in range(num_sets):\n # sizes[ii] = input_sets[ii].shape[0]/elem_size\n # num_elems *= sizes[ii]\n # # try:\n # # from pyapprox.weave import c_cartesian_product\n # # # note c_cartesian_product takes_num_elems as last arg and cython\n # # # takes elem_size\n # # return c_cartesian_product(input_sets, elem_size, sizes, num_elems)\n # # except:\n # # print ('cartesian_product extension failed')\n\n # result = np.empty(\n # (num_sets*elem_size, num_elems), dtype=type(input_sets[0][0]))\n # for ii in range(num_elems):\n # multi_index = ind2sub(sizes, ii, num_elems)\n # for jj in range(num_sets):\n # for kk in range(elem_size):\n # result[jj*elem_size+kk, ii] =\\\n # input_sets[jj][multi_index[jj]*elem_size+kk]\n # return result", "def batch_dot_product_sparse(spectra, tdata, nz, use_gpu):\n\n if (use_gpu):\n #Use GPU to do dot products in batch\n return _batch_dot_product_sparse_gpu(spectra, tdata)\n\n #Need to find shape of output array of batch dot product\n nrows = 0\n nbasis = None\n for key in tdata:\n nrows += tdata[key].shape[1]\n if (nbasis is None):\n nbasis = tdata[key].shape[2]\n\n #Create empty array rather than stacking a list - faster\n Tbs = np.empty((nz, nrows, nbasis))\n #Loop over all templates\n for i in range(nz):\n irow = 0\n for s in spectra:\n key = s.wavehash\n curr_tb = s.Rcsr.dot(tdata[key][i,:,:])\n #Copy this dot product result into the Tbs array\n Tbs[i, irow:irow+curr_tb.shape[0],:] = curr_tb\n irow += curr_tb.shape[0]\n return Tbs", "def complex_mul1d(a, b):\n op = partial(torch.einsum, \"bix,iox->box\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ],\n dim=-1)", "def asummult (array1,array2,dimension=None,keepdims=0):\r\n if dimension == None:\r\n array1 = N.ravel(array1)\r\n array2 = N.ravel(array2)\r\n dimension = 0\r\n return asum(array1*array2,dimension,keepdims)", "def complex_mul2d(a, b):\n op = partial(torch.einsum, \"bixy,ioxy->boxy\")\n return torch.stack([\n op(a[..., 0], b[..., 0]) - op(a[..., 1], b[..., 1]),\n op(a[..., 1], b[..., 0]) + op(a[..., 0], b[..., 1])\n ],\n dim=-1)", "def einsum(subscripts, *operands):\n raise NotImplementedError", "def _generate_mult_process(X, mat, inits):\n M = np.empty_like(X, dtype=float)\n M[..., 0] = inits[X[..., 0]]\n M[..., 1:] = mat[X[..., :-1], X[..., 1:]]\n np.cumprod(M, axis=-1, out=M)\n return M", "def _inner_product_b1(self, ginvdga, ginvdgb, areas_bp):\n return self.b1 * gs.sum(\n gs.einsum(\"...bii->...b\", ginvdga)\n * gs.einsum(\"...bii->...b\", ginvdgb)\n * areas_bp,\n axis=-1,\n )", "def batch_vTAv(A: np.ndarray, v: np.ndarray) -> np.ndarray:\n\n \"\"\" Faster than\n Av = np.matmul(A, v[...,:,None]) # [B, X, 1]\n return np.matmul(v[...,None,:], Av).squeeze((-2, -1)) # [B]\n \"\"\"\n\n return np.einsum(\"...k,...kl,...l->...\", v, A, v)", "def inner_product(pattern_one, pattern_two):\n\n return np.sum(np.multiply(pattern_one, pattern_two))", "def matrix_dot(*args):\r\n rval = args[0]\r\n for a in args[1:]:\r\n rval = theano.tensor.dot(rval, a)\r\n return rval", "def forward(self, emb_inputs: torch.Tensor) -> torch.Tensor:\n # indexing data for outer product\n p = emb_inputs[:, self.rowidx] # shape = (B, NC2, E)\n q = emb_inputs[:, self.colidx] # shape = (B, NC2, E)\n\n # apply kernel on outer product\n if self.kernel_type == \"mat\":\n # unsqueeze p to (B, 1, NC2, E), \n # then multiply kernel and return shape = (B, E, NC2, E)\n kp = p.unsqueeze(1) * self.kernel\n \n # aggregate last dimension of kp and return shape = (B, E, NC2)\n # then tranpose to shape = (B, NC2, E)\n kp = kp.sum(dim=-1).transpose(1, 2)\n\n # multiply q to kp and return shape = (B, NC2, E)\n # then aggregate outputs with last dimension to shape (B, NC2)\n outputs = (kp * q).sum(dim=-1)\n else:\n # multiply q and kernel to p and return shape = (B, NC2, E)\n # then aggregate outputs with last dimension to shape (B, NC2)\n outputs = (p * q * self.kernel.unsqueeze(0)).sum(dim=-1)\n \n # reshape outputs to (B, 1, NC2)\n return outputs.unsqueeze(1)", "def rmatmul(self, x, work_buffer) -> ndarray:\n x = asarray(x)\n space = asarray(work_buffer).flat\n\n if (x.ndim == 0):\n ValueError(\n \"matmul: Input operand 1 does not have enough dimensions \"\n \"(has 0, gufunc core with signature (n?,k),(k,m?)->(n?,m?) \"\n \"requires 1\"\n )\n\n if x.ndim == 1 and self.ndim == 1:\n # Dot product\n if x.shape[0] == self._size:\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n out = matmul(x[:k], self[self._begin:]).view(ndarray)\n out += matmul(x[k:], self[:self._end]).view(ndarray)\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n out = matmul(x, part)\n\n return(out.view(ndarray))\n else:\n raise ValueError(\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?)\"\n \" (size {n} is different from {m})\".format(\n n=self._size,\n m=x.shape[0]\n )\n )\n elif x.ndim == 1 and self.ndim > 1:\n if x.shape[0] == self.shape[-2]:\n if self.ndim == 2:\n out = empty(self.shape[-1])\n out2 = space[:self.shape[-1]]\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n matmul(x[:k], self[self._begin:], out)\n out += matmul(x[k:], self[:self._end], out2)\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(x, part, out)\n\n return(out)\n else:\n out = empty(\n (self._size, *self.shape[1:-2], self.shape[-1])\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n matmul(x, self[self._begin:], out[:k])\n matmul(x, self[:self._end], out[k:])\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(x, part, out)\n\n return(out)\n else:\n raise ValueError(\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?)\"\n \" (size {n} is different from {m})\".format(\n n=self.shape[-2],\n m=x.shape[0]\n )\n )\n elif x.ndim > 1 and self.ndim == 1:\n if x.shape[-1] == self.shape[0]:\n out = empty(x.shape[:-1])\n out2 = space[:reduce(operator.mul, x.shape[:-1])].reshape(\n x.shape[:-1]\n )\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n matmul(x[..., :, :k], self[self._begin:], out)\n out += matmul(x[..., :, k:], self[:self._end], out2)\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(x, part, out)\n\n return(out)\n else:\n raise ValueError(\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?)\"\n \" (size {n} is different from {m})\".format(\n n=self.shape[-2],\n m=x.shape[0]\n )\n )\n elif self.ndim == 2:\n if (x.shape[-1] == self.shape[-2]):\n out_shape = (*x.shape[:-1], self.shape[-2])\n out = empty(out_shape)\n out2 = space[:reduce(operator.mul, out_shape)].reshape(\n out_shape\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n matmul(x[..., :, :k], self[self._begin:], out)\n out += matmul(x[..., :, k:], self[:self._end], out2)\n\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(x, part, out)\n\n return(out.view(ndarray))\n\n else:\n raise ValueError(\n (\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->\"\n \"(n?,m?) (size {n} is different from {m})\"\n ).format(\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )\n else:\n if (x.shape[-1] == self.shape[-2]):\n self_shape = (self._size, *self.shape[1:-2])\n\n starexpr = tuple(\n zip_longest(self_shape, x.shape[:-2], fillvalue=1)\n )\n if star_can_broadcast(starexpr):\n broadcast_shape = tuple(\n starmap(\n lambda a, b: max(a, b),\n starexpr\n )\n )\n\n out = empty(\n (*broadcast_shape, x.shape[-2], self.shape[-1])\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n if x.ndim > 2:\n matmul(x[:k], self[self._begin:], out[:k])\n matmul(x[k:], self[:self._end], out[k:])\n else:\n matmul(x, self[self._begin:], out[:k])\n matmul(x, self[:self._end], out[k:])\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(x, part, out)\n\n return(out.view(ndarray))\n else:\n raise ValueError(\n (\n \"operands could not be broadcast together with\"\n \"remapped shapes [original->remapped]: \"\n \"{shape_b}->({shape_bn}, newaxis,newaxis) \"\n \"{shape_a}->({shape_an}, newaxis,newaxis) \"\n \"and requested shape ({n},{m})\"\n ).format(\n shape_a=self_shape,\n shape_b=x.shape,\n shape_an=self.shape[:-2].__str__()[:-1],\n shape_bn=x.shape[:-2].__str__()[:-1],\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )\n else:\n raise ValueError(\n (\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->\"\n \"(n?,m?) (size {n} is different from {m})\"\n ).format(\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )", "def test_cross_multiply_shape():\n array_1 = np.ones((1, 3))\n array_out = utils.cross_multiply_array(array_1, axis=1)\n assert (1, 3, 3) == array_out.shape", "def multi_dot(arrays, out=None):\n\n n = len(arrays)\n\n if n < 2:\n checker_throw_value_error(\"multi_dot\", \"arrays\", n, \">1\")\n\n result = arrays[0]\n for id in range(1, n):\n result = dpnp.dot(result, arrays[id])\n\n return result", "def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z", "def _calc_batch_dot_product_3d2d_gpu(Tbs, zc):\n\n #Use batch_dot_product_3d2d kernel to compute model array\n # Load CUDA kernel\n cp_module = cp.RawModule(code=cuda_source)\n batch_dot_product_3d2d_kernel = cp_module.get_function('batch_dot_product_3d2d')\n\n #Array dims needed by CUDA:\n nz = zc.shape[0]\n nrows = Tbs[0].shape[0]\n n = nrows * nz\n nbasis = zc.shape[1]\n\n #Allocate CUPY array and calc blocks to be used\n blocks = (n+block_size-1)//block_size\n model = cp.empty((nz, nrows), cp.float64)\n #Launch kernel and synchronize\n batch_dot_product_3d2d_kernel((blocks,), (block_size,), (Tbs, zc, model, nrows, nbasis, nz))\n #cp.cuda.Stream.null.synchronize()\n return model", "def prod(input, axis=None, dtype=None, keepdims=False, acc_dtype=None,\r\n no_zeros_in_input=False):\r\n\r\n out = elemwise.Prod(axis, dtype=dtype, acc_dtype=acc_dtype,\r\n no_zeros_in_input=no_zeros_in_input)(input)\r\n\r\n if keepdims:\r\n out = makeKeepDims(input, out, axis)\r\n return out", "def _prod_vectorized(M1, M2):\n sh1 = M1.shape\n sh2 = M2.shape\n assert len(sh1) >= 2\n assert len(sh2) >= 2\n assert sh1[-1] == sh2[-2]\n\n ndim1 = len(sh1)\n t1_index = list(xrange(ndim1-2)) + [ndim1-1, ndim1-2]\n return np.sum(np.transpose(M1, t1_index)[..., np.newaxis] *\n M2[..., np.newaxis, :], -3)", "def flatten(\n inner: transform.GradientTransformation\n) -> transform.GradientTransformation:\n\n def _flatten(params):\n \"\"\"Flattens and concatenates all tensors in params to a single vector.\"\"\"\n params, _ = tree_flatten(params)\n return jnp.concatenate([jnp.reshape(param, [-1]) for param in params])\n\n def _unflatten(updates, flat):\n \"\"\"Extracts tensors from flat, using the structure and shapes of params.\"\"\"\n updates_flat, treedef = tree_flatten(updates)\n offsets = []\n for update in updates_flat:\n size = np.prod(update.shape)\n if offsets:\n offsets.append(size + offsets[-1])\n else:\n offsets.append(size)\n del offsets[-1]\n flat_split = jnp.split(flat, offsets)\n reshaped = [\n jnp.reshape(flat_update, update.shape)\n for flat_update, update in zip(flat_split, updates_flat)\n ]\n return tree_unflatten(treedef, reshaped)\n\n def init_fn(params):\n flat = _flatten(params)\n return inner.init(flat)\n\n def update_fn(updates, state, params=None):\n if params is not None:\n params = _flatten(params)\n updates_flat, state = inner.update(_flatten(updates), state, params)\n updates = _unflatten(updates, updates_flat)\n return updates, state\n\n return transform.GradientTransformation(init_fn, update_fn)", "def product_nb(a):\n out = np.empty(a.shape[1], dtype=np.float_)\n for col in range(a.shape[1]):\n out[col] = product_1d_nb(a[:, col])\n return out", "def _product(shape):\n result = 1\n for size in shape:\n result *= size\n return result", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n # K.expand_dims 默认axis=-1\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def mdot(*args):\n ret = args[0]\n for a in args[1:]:\n ret = np.dot(ret, a)\n return ret", "def matrix_dot(*args):\n rval = args[0]\n for a in args[1:]:\n rval = tm.dot(rval, a)\n return rval", "def multi_matmul(tensors, constant=False):\n\n for a in tensors:\n if not (1 <= a.ndim <= 2):\n raise ValueError(\n \"%d-dimensional tensor given. Tensor must be one or two-dimensional\"\n % (a.ndim,)\n )\n\n n = len(tensors)\n if n < 2:\n raise ValueError(\"Expecting at least two arrays.\")\n elif n == 2:\n return matmul(tensors[0], tensors[1], constant)\n\n tensors = [a if isinstance(a, Tensor) else np.asarray(a) for a in tensors]\n\n # save original ndim to reshape the result array into the proper form later\n ndim_first, ndim_last = tensors[0].ndim, tensors[-1].ndim\n\n # Explicitly convert vectors to 2D arrays to keep the logic of this function simpler\n if tensors[0].ndim == 1:\n tensors[0] = mg.expand_dims(\n tensors[0],\n axis=0,\n constant=tensors[0].constant if isinstance(tensors[0], Tensor) else True,\n )\n if tensors[-1].ndim == 1:\n tensors[-1] = mg.expand_dims(\n tensors[-1],\n axis=1,\n constant=tensors[-1].constant if isinstance(tensors[-1], Tensor) else True,\n )\n\n if n == 3:\n result = _multi_matmul_three(tensors[0], tensors[1], tensors[2], constant)\n else:\n order = _multi_matmul_chain_order(tensors)\n result = _multi_matmul(tensors, order, 0, n - 1, constant)\n\n # return proper shape since we possibly added dimensions to the first\n # and last arrays\n if ndim_first == 1 and ndim_last == 1:\n return result[0, 0]\n elif ndim_first == 1 or ndim_last == 1:\n return result.reshape(-1)\n else:\n return result", "def prod(tensor, axis=None):\n raise NotImplementedError", "def innerprod_q2(q1, q2):\n T = q1.shape[1]\n val = sum(sum(q1 * q2)) / T\n\n return (val)", "def _kronecker_product(mat1: tf.Tensor, mat2: tf.Tensor) -> tf.Tensor:\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def kronecker_operators(*args):\n return reduce(wrapped_kronecker, *args)", "def inner_product(p1, l1x, l1y,\n p2, l2x, l2y, var):\n n = l1x.shape[0]\n l1x = l1x.reshape(n, 1).astype('float32')\n l2x = l2x.reshape(1, n).astype('float32')\n\n l1y = l1y.reshape(n, 1).astype('float32')\n l2y = l2y.reshape(1, n).astype('float32')\n var = var.astype('float32')\n\n coupling = np.exp(-((l1x - l2x) ** 2 +\n (l1y - l2y) ** 2) / (4 * var))\n\n # return np.einsum('i,j,ij->', p1, p2, coupling)\n return np.dot(p1, coupling).dot(p2)", "def TensorProduct(**kw_kernels):\n return Composite('*', **kw_kernels)", "def np_esum(spec, *arrays, optimize='greedy'):\n return numpy.einsum(spec, *arrays, optimize=optimize)", "def dot_as_einsum(x: JaxExpression, y: JaxExpression, params: Params) -> Einsum:\n dimension_numbers = params['dimension_numbers']\n (x_contract, y_contract), (x_batch, y_batch) = dimension_numbers\n x_ndim, y_ndim = len(x.shape), len(y.shape)\n letter_iter = einsum.einsum_letters()\n x_dims = ''.join(it.islice(letter_iter, x_ndim))\n y_dims = list(it.islice(letter_iter, y_ndim))\n for x_dim, y_dim in zip(x_contract + x_batch, y_contract + y_batch):\n y_dims[y_dim] = x_dims[x_dim]\n y_dims = ''.join(y_dims)\n out_batch_dims = [x_dims[dim] for dim in x_batch]\n out_dims = out_batch_dims + ([xd for xd in x_dims if xd not in y_dims] +\n [yd for yd in y_dims if yd not in x_dims])\n out_dims = ''.join(out_dims)\n return Einsum(f'{x_dims},{y_dims}->{out_dims}', (x, y))", "def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def _mean_post_einsum(g: to.Tensor, lpj: to.Tensor) -> to.Tensor:\n return to.einsum(\"ns...,ns->n...\", (g, lpj2pjc(lpj)))", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def inner_product(programs, symbol_names, symbol_values, other_programs):\n return MATH_OP_MODULE.tfq_inner_product(programs, symbol_names,\n tf.cast(symbol_values, tf.float32),\n other_programs)", "def batch_concat(\n values: types.NestedArray,\n num_batch_dims: int = 1,\n) -> jnp.ndarray:\n flatten_fn = lambda x: _flatten.apply(None, x, num_batch_dims)\n flat_leaves = tree.map_structure(flatten_fn, values)\n return jnp.concatenate(tree.flatten(flat_leaves), axis=-1)", "def dim_mul(dims1, dims2):\n return (\n dims1[0] + dims2[0],\n dims1[1] + dims2[1],\n dims1[2] + dims2[2],\n dims1[3] + dims2[3],\n dims1[4] + dims2[4],\n dims1[5] + dims2[5],\n dims1[6] + dims2[6],\n )", "def test_cross_multiply_from_list():\n array_1 = np.ones((1, 3)).tolist()\n array_out = utils.cross_multiply_array(array_1, axis=1)\n assert (1, 3, 3) == array_out.shape", "def cartesian_product(*arrays):\n length = len(arrays)\n dtype = np.result_type(*arrays)\n arr = np.empty([len(a) for a in arrays] + [length], dtype=dtype)\n for idx, array in enumerate(np.ix_(*arrays)):\n arr[...,idx] = array\n return arr.reshape(-1, length)", "def matmul(xs: List[List[float]],\n ys: List[List[float]]) -> List[List[float]]:\n product = []\n for x_row in range(len(xs)):\n row = []\n for y_col in range(len(ys[0])):\n col = [ys[y_row][y_col] for y_row in range(len(ys))]\n row.append(Math.dot(xs[x_row], col))\n product.append(row)\n return product", "def _inner_product_a1(self, ginvdga, ginvdgb, areas_bp):\n return self.a1 * gs.sum(\n gs.einsum(\"...bii->...b\", gs.matmul(ginvdga, ginvdgb)) * areas_bp,\n axis=-1,\n )", "def prodSumNumpy(*arrays):\n return np.sum(np.prod(arrays,axis=0))", "def frobenius_inner_prod(mat1, mat2):\n assert mat1.shape==mat2.shape\n # assert isinstance(mat1, Variable) and isinstance(mat2, Variable))\n f = mat1.mul(mat2).sum()\n return f", "def kronecker_product(mat1, mat2):\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def multikron(a):\n return _reduce(_np.kron, a)", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n # todo: check that this is correct\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def axis_element_wise_multiplication(t1, t2, which_axis):\n # assert len(K.int_shape(t1)) == len(K.int_shape(t2)) + 1, \"rank(t1) should be rank(t2) + 1\"\n slices = tf.unstack(t1, axis=which_axis)\n # assert K.int_shape(slices[0]) == K.int_shape(t2), \"Slices of t1 were not the same shape as t2\"\n multiplies = []\n for s in slices:\n multiplies.append(t2 * s)\n return tf.stack(multiplies, axis=2)", "def inner(self, a: np.ndarray, b: np.ndarray) -> float:\n return a.T @ (self.mass @ b)", "def dot_product(x, kernel):\r\n if K.backend() == 'tensorflow':\r\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\r\n else:\r\n return K.dot(x, kernel)", "def __mul__(self, oth):\n\t\tif isinstance(oth, Matrix) or isiterable(oth):\n\t\t\t# matrix\n\t\t\toth_m = oth\n\t\t\tif not isinstance(oth_m, Matrix):\n\t\t\t\toth_m = Matrix(oth_m)\t\t\t\n\t\t\tres_m = self._mat_mul(oth_m)\n\t\t\tif isinstance(oth, Matrix):\n\t\t\t\treturn res_m\n\t\t\telse:\n\t\t\t\treturn type(oth)(res_m._unnest())\n\t\telse:\n\t\t\t# scalar\n\t\t\treturn Matrix._make_new(lambda i,j: self.data[i][j] * oth, self.rows, self.cols)", "def cartesian_product(*arrays):\n\n la = len(arrays)\n if la == 0:\n return np.array([])\n dtype = np.result_type(*arrays)\n arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)\n for i, a in enumerate(np.ix_(*arrays)):\n arr[..., i] = a\n return arr.reshape(-1, la)", "def inner(self, v1, v2, x, c, keep_shape=False):\n c = self.truncate_c(c)\n metric = tf.square(self._lambda_x(x, c))\n product = v1 * metric * v2\n res = tf.reduce_sum(product, axis=-1, keepdims=True)\n if keep_shape:\n # return tf.broadcast_to(res, x.shape)\n last_dim = x.shape.as_list()[-1]\n return tf.concat([res for _ in range(last_dim)], axis=-1)\n return tf.squeeze(res, axis=-1)", "def _kp(a, b):\n if a.shape != b.shape or a.shape[-1] != 1:\n raise(ValueError)\n N = a.shape[0]\n # take the outer product over the last two axes, then reshape:\n return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)", "def _e_2d_(p, a):\r\n diff = a - p[np.newaxis, :]\r\n return np.einsum('ij,ij->i', diff, diff)", "def call(self, inputs, *args, **kwargs):\n batch_dims = inputs.shape[:nest_utils.get_outer_rank(inputs, self._spec)]\n num_batch_elems = tf.reduce_prod(batch_dims)\n transformed_inputs = tf.reshape(inputs, (num_batch_elems, -1))\n result = self._batch(transformed_inputs, *args, **kwargs)\n return tf.reshape(result, inputs.shape)", "def inner_product(self, tangent_vec_a, tangent_vec_b, base_point):\n args = {\n \"tangent_vec_a\": tangent_vec_a,\n \"tangent_vec_b\": tangent_vec_b,\n \"base_point\": base_point,\n }\n inner_products = self._iterate_over_factors(\"inner_product\", args)\n return sum(inner_products)", "def matrix_power(input, count):\n\n is_input_dparray = isinstance(input, dparray)\n\n if not use_origin_backend(input) and is_input_dparray and count > 0:\n result = input\n for id in range(count - 1):\n result = dpnp.matmul(result, input)\n\n return result\n\n input1 = dpnp.asnumpy(input) if is_input_dparray else input\n\n # TODO need to put dparray memory into NumPy call\n result_numpy = numpy.linalg.matrix_power(input1, count)\n result = result_numpy\n if isinstance(result, numpy.ndarray):\n result = dparray(result_numpy.shape, dtype=result_numpy.dtype)\n for i in range(result.size):\n result._setitem_scalar(i, result_numpy.item(i))\n\n return result", "def inner_product(self, tangent_vec_a, tangent_vec_b, base_point):\n to_squeeze = False\n if tangent_vec_a.ndim == 2 and tangent_vec_b.ndim == 2:\n to_squeeze = True\n if tangent_vec_a.ndim == 2:\n tangent_vec_a = gs.expand_dims(tangent_vec_a, axis=0)\n if tangent_vec_b.ndim == 2:\n tangent_vec_b = gs.expand_dims(tangent_vec_b, axis=0)\n\n point_a = base_point + tangent_vec_a\n point_b = base_point + tangent_vec_b\n inner_prod = gs.zeros((gs.maximum(len(tangent_vec_a), len(tangent_vec_b)), 1))\n if self.a0 > 0 or self.a2 > 0:\n vertex_areas_bp = self._space.vertex_areas(base_point)\n if self.a0 > 0:\n inner_prod += self._inner_product_a0(\n tangent_vec_a, tangent_vec_b, vertex_areas_bp=vertex_areas_bp\n )\n if self.a2 > 0:\n inner_prod += self._inner_product_a2(\n tangent_vec_a,\n tangent_vec_b,\n base_point=base_point,\n vertex_areas_bp=vertex_areas_bp,\n )\n if self.a1 > 0 or self.b1 > 0 or self.c1 > 0 or self.b1 > 0:\n one_forms_bp = self._space.surface_one_forms(base_point)\n surface_metrics_bp = self._space._surface_metric_matrices_from_one_forms(\n one_forms_bp\n )\n normals_bp = self._space.normals(base_point)\n areas_bp = gs.sqrt(gs.linalg.det(surface_metrics_bp))\n\n if self.c1 > 0:\n inner_prod += self._inner_product_c1(\n point_a, point_b, normals_bp, areas_bp\n )\n if self.d1 > 0 or self.b1 > 0 or self.a1 > 0:\n ginv_bp = gs.linalg.inv(surface_metrics_bp)\n one_forms_a = self._space.surface_one_forms(point_a)\n one_forms_b = self._space.surface_one_forms(point_b)\n if self.d1 > 0:\n inner_prod += self._inner_product_d1(\n one_forms_a,\n one_forms_b,\n one_forms_bp,\n areas_bp=areas_bp,\n inv_surface_metrics_bp=ginv_bp,\n )\n\n if self.b1 > 0 or self.a1 > 0:\n dga = (\n gs.matmul(\n one_forms_a, gs.transpose(one_forms_a, axes=(0, 1, 3, 2))\n )\n - surface_metrics_bp\n )\n dgb = (\n gs.matmul(\n one_forms_b, gs.transpose(one_forms_b, axes=(0, 1, 3, 2))\n )\n - surface_metrics_bp\n )\n ginvdga = gs.matmul(ginv_bp, dga)\n ginvdgb = gs.matmul(ginv_bp, dgb)\n inner_prod += self._inner_product_a1(ginvdga, ginvdgb, areas_bp)\n inner_prod += self._inner_product_b1(ginvdga, ginvdgb, areas_bp)\n return gs.squeeze(inner_prod, axis=0) if to_squeeze else inner_prod", "def ket(i, dims):\n if not isinstance(i, list):\n i=[i]\n #Single qubit\n if len(i)==1:\n val = np.zeros((dims,1))\n val[i] = 1\n return val.reshape(dims,1)\n #multiple qubits. we need to tensor them together\n val = np.ones((1,1)) #initialize variable, so we have something to tensor with, the first time\n for x in i:\n val = np.tensordot(val,ket([x],dims), axes=0).transpose(0,2,1,3)\n val = val.reshape(val.shape[0]*val.shape[1],val.shape[2]*val.shape[3])\n return val.reshape(val.shape[0],1)", "def batch(size, iterable):\r\n return list(xbatch(size, iterable))", "def _flatten_outer_dims(logits):\n rank = array_ops.rank(logits)\n last_dim_size = array_ops.slice(\n array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])\n output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))\n\n # Set output shape if known.\n if not context.executing_eagerly():\n shape = logits.get_shape()\n if shape is not None and shape.dims is not None:\n shape = shape.as_list()\n product = 1\n product_valid = True\n for d in shape[:-1]:\n if d is None:\n product_valid = False\n break\n else:\n product *= d\n if product_valid:\n output_shape = [product, shape[-1]]\n output.set_shape(output_shape)\n\n return output", "def mdot(*args):\n r = args[0]\n for a in args[1:]:\n r = N.dot(r,a)\n return r", "def dot_product(row, column):\n return reduce(lambda x, y: x + y, [x * y for x, y in zip(row, column)])", "def einsum(ops, *args):\n\n if len(args) != 2:\n raise ValueError(\"Currently only two operands are supported\")\n\n inops, outops = ops.split('->')\n inops = inops.split(',')\n\n # All indices that are in input AND in output are multiplies\n multiplies = sorted(list(set(inops[0]) & set(inops[1]) & set(outops)))\n # All indices that are in input BUT NOT in output are sum contractions\n sums = sorted(list((set(inops[0]) & set(inops[1])) - set(outops)))\n\n # Map sums and indices to axis integers\n multiplies = [[inop.find(x) for x in multiplies] for inop in inops]\n sums = [[inop.find(x) for x in sums] for inop in inops]\n\n # Find output axes in input axes for final transpose\n # Values very likely lie outside of output tensor shape, so\n # just map them values to their rank (index in ordered list)\n transpose = [''.join(inops).find(x) for x in outops]\n transpose = scipy.stats.rankdata(transpose).astype(int) - 1\n\n return tensordot2(*args, sum=sums, multiply=multiplies).transpose(transpose)" ]
[ "0.7076053", "0.6733254", "0.6566906", "0.6538848", "0.6200467", "0.5971671", "0.5898091", "0.5882279", "0.586811", "0.58539116", "0.5828389", "0.58158463", "0.57993835", "0.5773256", "0.57691", "0.570448", "0.5675385", "0.5650381", "0.5588937", "0.55867285", "0.5513858", "0.55136836", "0.5509871", "0.54723763", "0.5454234", "0.5443507", "0.5403955", "0.538054", "0.5368168", "0.53315485", "0.5308115", "0.5301764", "0.5278057", "0.5264163", "0.526193", "0.52577746", "0.5256614", "0.5252011", "0.52415144", "0.52362704", "0.52101207", "0.5200067", "0.5197112", "0.5193679", "0.5186016", "0.517587", "0.5168258", "0.5164027", "0.51615345", "0.5139004", "0.5121352", "0.51146847", "0.5087776", "0.5078121", "0.5076561", "0.50721556", "0.50660783", "0.506358", "0.50615114", "0.5060715", "0.5060588", "0.5052743", "0.5046446", "0.50417864", "0.5030978", "0.50273466", "0.50169057", "0.50169057", "0.50169057", "0.50169057", "0.50145686", "0.5013449", "0.50118506", "0.5010836", "0.50078964", "0.50074065", "0.500337", "0.50028914", "0.49966466", "0.4992702", "0.4990798", "0.49905857", "0.49876967", "0.49758527", "0.4965378", "0.49639866", "0.49627784", "0.49589342", "0.49570483", "0.49565777", "0.49564245", "0.49413946", "0.49409354", "0.4939452", "0.49393687", "0.49270764", "0.49247286", "0.4918662", "0.49143824", "0.4912251" ]
0.6739568
1
Performs a batched outer product over the last dimension.
def batch_outer(a: np.ndarray, b: np.ndarray, verify: bool = True) -> np.ndarray: if verify and a.shape[:-1] != b.shape[:-1]: raise ValueError("All except the last dimension have to be equal") return np.einsum("...i,...j->...ij", a, b) # slightly faster than np.multiply(a[...,:,None], b[...,None,:])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_outer_product(a, b):\n a, b = normalize_and_check_ndim([a, b], 2)\n # This is a batchwise version of the matrix multiplication approach\n # used for outer_product(), see explanation there.\n return a[:, :, np.newaxis] * b[:, np.newaxis, :]", "def outer_product(x):\n return keras.backend.batch_dot(\n x[0]\n , x[1]\n , axes=[1,1]\n ) / x[0].get_shape().as_list()[1]", "def outer_product(input_sets, axis=0):\n out = cartesian_product(input_sets)\n return np.prod(out, axis=axis)\n\n # try:\n # from pyapprox.cython.utilities import outer_product_pyx\n # # fused type does not work for np.in32, np.float32, np.int64\n # # so envoke cython cast\n # if np.issubdtype(input_sets[0][0], np.signedinteger):\n # return outer_product_pyx(input_sets, 1)\n # if np.issubdtype(input_sets[0][0], np.floating):\n # return outer_product_pyx(input_sets, 1.)\n # else:\n # return outer_product_pyx(input_sets, input_sets[0][0])\n # except ImportError:\n # print('outer_product extension failed')\n\n # num_elems = 1\n # num_sets = len(input_sets)\n # sizes = np.empty((num_sets), dtype=int)\n # for ii in range(num_sets):\n # sizes[ii] = len(input_sets[ii])\n # num_elems *= sizes[ii]\n\n # # try:\n # # from pyapprox.weave import c_outer_product\n # # return c_outer_product(input_sets)\n # # except:\n # # print ('outer_product extension failed')\n\n # result = np.empty((num_elems), dtype=type(input_sets[0][0]))\n # for ii in range(num_elems):\n # result[ii] = 1.0\n # multi_index = ind2sub(sizes, ii, num_elems)\n # for jj in range(num_sets):\n # result[ii] *= input_sets[jj][multi_index[jj]]\n\n # return result", "def outer_product(a, b, batch=False):\n if batch:\n return batch_outer_product(a, b)\n a, b = normalize_and_check_ndim([a, b], 1)\n # The outer product is equivalent to matrix multiplication a * b\n # where the vector a is interpreted as a column matrix and the\n # vector b as a row matrix. The following reshaping and\n # multiplication accomplishes this.\n return a[:, np.newaxis] * b[np.newaxis, :]", "def tensor_outer_product(tensor1, tensor2):\n shape_1 = tensor1.shape\n shape_2 = tensor2.shape\n s1 = len(shape_1)\n s2 = len(shape_2)\n \n shape_1 = shape_1 + (1, )*s2\n shape_2 = (1, )*s1 + shape_2\n return np.reshape(tensor1, shape_1) * np.reshape(tensor2, shape_2)", "def batch_outer_sum(a, b):\n a, b = normalize_and_check_ndim([a, b], 2)\n # Due to broadcasting, this sum works analogously to batch matrix\n # multiplication. See also comments in batch_outer_product().\n return a[:, :, np.newaxis] + b[:, np.newaxis, :]", "def _outer(a, b):\n a_flat = torch.reshape(a, [-1])\n b_flat = torch.reshape(b, [-1])\n a_mul = torch.unsqueeze(a_flat, dim=-1)\n b_mul = torch.unsqueeze(b_flat, dim=0)\n return a_mul * b_mul", "def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]", "def batch_inner(a: np.ndarray, b: np.ndarray, verify: bool = True) -> np.ndarray:\n\n if verify and a.shape != b.shape:\n raise ValueError(\"All dimensions have to be equal\")\n\n if a.shape[-1] == 0:\n return np.empty_like(a)\n\n return np.einsum(\"...i,...i->...\", a, b) # faster than np.sum(a * b, axis=-1)", "def outer(a, b=None):\n if b is None:\n b = a\n size_a = tuple(a.size()) + (b.size()[-1],)\n size_b = tuple(b.size()) + (a.size()[-1],)\n a = a.unsqueeze(dim=-1).expand(*size_a)\n b = b.unsqueeze(dim=-2).expand(*size_b)\n return a, b", "def forward(self, emb_inputs: torch.Tensor) -> torch.Tensor:\n # indexing data for outer product\n p = emb_inputs[:, self.rowidx] # shape = (B, NC2, E)\n q = emb_inputs[:, self.colidx] # shape = (B, NC2, E)\n\n # apply kernel on outer product\n if self.kernel_type == \"mat\":\n # unsqueeze p to (B, 1, NC2, E), \n # then multiply kernel and return shape = (B, E, NC2, E)\n kp = p.unsqueeze(1) * self.kernel\n \n # aggregate last dimension of kp and return shape = (B, E, NC2)\n # then tranpose to shape = (B, NC2, E)\n kp = kp.sum(dim=-1).transpose(1, 2)\n\n # multiply q to kp and return shape = (B, NC2, E)\n # then aggregate outputs with last dimension to shape (B, NC2)\n outputs = (kp * q).sum(dim=-1)\n else:\n # multiply q and kernel to p and return shape = (B, NC2, E)\n # then aggregate outputs with last dimension to shape (B, NC2)\n outputs = (p * q * self.kernel.unsqueeze(0)).sum(dim=-1)\n \n # reshape outputs to (B, 1, NC2)\n return outputs.unsqueeze(1)", "def outer(x, y):\r\n if x.ndim != 1:\r\n x = x.flatten()\r\n if y.ndim != 1:\r\n y = y.flatten()\r\n return dot(\r\n x.dimshuffle(0, 'x'),\r\n y.dimshuffle('x', 0))", "def outer_product(A, B): \n print(A)\n print(B)\n A_rows = len(A)\n A_columns = len(A[0])\n\n B_rows = len(B)\n B_columns = len(B[0])\n \n if A_columns == 1 and B_rows == 1:\n \n outer_product = []\n\n # multi-line list comprehension for outer product\n [outer_product.append([A[i][0] * B[0][j] for j in range(B_columns)]) \n for i in range(A_rows)]\n\n return outer_product\n\n else:\n print(\"dimensions of vector do not match.\")", "def outer_sum(a, b, batch=False):\n if batch:\n return batch_outer_sum(a, b)\n # TODO: naming. Surely this has to be called something sensible?\n a, b = normalize_and_check_ndim([a, b], 1)\n # Due to broadcasting, this sum works analogously to matrix\n # multiplication. See also comments in outer_product().\n return a[:, np.newaxis] + b[np.newaxis, :]", "def interior_tensor_product(mx, dim_a, dim_b, e=None):\n assert _np.shape(mx) == (dim_a * dim_b, dim_a * dim_b), \"Dimensions do not agree with matrix size\"\n assert _np.shape(e)[0] == _np.shape(e)[1], \"e should be a square matrix\"\n basis_a = matrix_units(dim_a)\n basis_b = matrix_units(dim_b)\n return sum((_np.trace(_np.dot(mx, _np.kron(unit_a, unit_b).T)) * multikron([unit_a, e, unit_b])\n for unit_a in basis_a for unit_b in basis_b))", "def outer(a, b):\n m, n = a.size, b.size\n\n if m * n < 2**14:\n return mul_dense(a.reshape(m, 1), b.reshape(1, n))\n\n out = np.empty((m, n), dtype=common_type(a, b))\n _outer_par(a.ravel(), b.ravel(), out, m, n)\n\n return out", "def _flatten_batch(self, matrix_tups):\n out_vecs = []\n for t in matrix_tups:\n for v in t:\n new_shape = (v.shape[0],)\n if len(v.shape) > 1:\n new_shape = new_shape + (np.prod(v.shape[1:]),)\n out_vecs.append(v.reshape(new_shape))\n return jnp.concatenate(out_vecs, axis=1)", "def prod(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.multiply.reduce(\n self, out=out, axis=axis, keepdims=keepdims, dtype=dtype\n )", "def outer_prod(x, y):\n if len(list(x.size())) != 2 or len(list(y.size())) != 2:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(2, x.size()[1], y.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.ger(x[0], y[0]) - torch.ger(x[1], -y[1])\n z[1] = torch.ger(x[0], -y[1]) + torch.ger(x[1], y[0])\n\n return z", "def matmul(self, x, work_buffer):\n\n x = asarray(x)\n space = work_buffer.flat\n\n if (x.ndim == 0):\n ValueError(\n \"matmul: Input operand 1 does not have enough dimensions \"\n \"(has 0, gufunc core with signature (n?,k),(k,m?)->(n?,m?) \"\n \"requires 1\"\n )\n\n if x.ndim == 1 and self.ndim == 1:\n # Dot product\n if x.shape[0] == self._size:\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n out = matmul(self[self._begin:], x[:k]).view(ndarray)\n out += matmul(self[:self._end], x[k:])\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n out = matmul(part, x).view(ndarray)\n\n return(out)\n else:\n raise ValueError(\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?)\"\n \" (size {n} is different from {m})\".format(\n n=self._size,\n m=x.shape[0]\n )\n )\n elif self.ndim == 1 and x.ndim > 1:\n if self._size == x.shape[-2]:\n out_shape = *x.shape[:-2], x.shape[-1]\n out = empty(out_shape)\n out2 = space[:reduce(operator.mul, out_shape)].reshape(\n out_shape\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n matmul(self[self._begin:], x[..., :k, :], out)\n out += matmul(self[:self._end], x[..., k:, :], out2)\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(part, x, out).view(ndarray)\n\n return(out)\n else:\n raise ValueError(\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?)\"\n \" (size {n} is different from {m})\".format(\n n=self.shape[-2],\n m=x.shape[0]\n )\n )\n elif self.ndim == 2:\n if (self.shape[-1] == x.shape[-2]):\n out = empty(\n (*x.shape[:-2], self.shape[-1], x.shape[-2])\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n matmul(self[self._begin:], x, out[..., :k, :])\n matmul(self[:self._end], x, out[..., k:, :])\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(part, x, out)\n\n return(out.view(ndarray))\n\n else:\n raise ValueError(\n (\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->\"\n \"(n?,m?) (size {n} is different from {m})\"\n ).format(\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )\n else:\n if (self.shape[-1] == x.shape[-2]):\n self_shape = (self._size, *self.shape[1:-2])\n\n starexpr = tuple(\n zip_longest(self_shape, x.shape[:-2], fillvalue=1)\n )\n if star_can_broadcast(starexpr):\n broadcast_shape = tuple(\n starmap(\n lambda a, b: max(a, b),\n starexpr\n )\n )\n\n out = empty(\n (*broadcast_shape, self.shape[-2], x.shape[-1])\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n if x.ndim > 2:\n matmul(self[self._begin:], x[:k], out[:k])\n matmul(self[:self._end], x[k:], out[k:])\n else:\n matmul(self[self._begin:], x, out[:k])\n matmul(self[:self._end], x, out[k:])\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(part, x, out)\n\n return(out.view(ndarray))\n else:\n raise ValueError(\n (\n \"operands could not be broadcast together with\"\n \"remapped shapes [original->remapped]: \"\n \"{shape_b}->({shape_bn}, newaxis,newaxis) \"\n \"{shape_a}->({shape_an}, newaxis,newaxis) \"\n \"and requested shape ({n},{m})\"\n ).format(\n shape_a=self_shape,\n shape_b=x.shape,\n shape_an=self.shape[:-2].__str__()[:-1],\n shape_bn=x.shape[:-2].__str__()[:-1],\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )\n else:\n raise ValueError(\n (\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->\"\n \"(n?,m?) (size {n} is different from {m})\"\n ).format(\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )", "def prod(input, axis=None, dtype=None, keepdims=False, acc_dtype=None,\r\n no_zeros_in_input=False):\r\n\r\n out = elemwise.Prod(axis, dtype=dtype, acc_dtype=acc_dtype,\r\n no_zeros_in_input=no_zeros_in_input)(input)\r\n\r\n if keepdims:\r\n out = makeKeepDims(input, out, axis)\r\n return out", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n # K.expand_dims 默认axis=-1\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def inner_product(state_1, state_2):\n return numpy.dot(state_1.conjugate(), state_2)", "def inner_product_batch(pattern_stack_one, pattern_num_one, pattern_stack_two, pattern_num_two):\n\n \"\"\"\n Notice that the two stacks can be different. So we can not deduce the lower triangular pattern from the \n other half.\n \"\"\"\n holder = np.zeros((pattern_num_one, pattern_num_two))\n for l in range(pattern_num_one):\n for m in range(pattern_num_two):\n holder[l, m] = np.sum(np.multiply(pattern_stack_one[l], pattern_stack_two[m]))\n\n return holder", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def dot_product(x, kernel):\n if K.backend() == 'tensorflow':\n # todo: check that this is correct\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n else:\n return K.dot(x, kernel)", "def dot_product(x, kernel):\r\n if K.backend() == 'tensorflow':\r\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\r\n else:\r\n return K.dot(x, kernel)", "def prod(tensor, axis=None):\n raise NotImplementedError", "def _flatten_outer_dims(logits):\n rank = array_ops.rank(logits)\n last_dim_size = array_ops.slice(\n array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])\n output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))\n\n # Set output shape if known.\n if not context.executing_eagerly():\n shape = logits.get_shape()\n if shape is not None and shape.dims is not None:\n shape = shape.as_list()\n product = 1\n product_valid = True\n for d in shape[:-1]:\n if d is None:\n product_valid = False\n break\n else:\n product *= d\n if product_valid:\n output_shape = [product, shape[-1]]\n output.set_shape(output_shape)\n\n return output", "def dot_product(x, kernel):\n return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)", "def rmatmul(self, x, work_buffer) -> ndarray:\n x = asarray(x)\n space = asarray(work_buffer).flat\n\n if (x.ndim == 0):\n ValueError(\n \"matmul: Input operand 1 does not have enough dimensions \"\n \"(has 0, gufunc core with signature (n?,k),(k,m?)->(n?,m?) \"\n \"requires 1\"\n )\n\n if x.ndim == 1 and self.ndim == 1:\n # Dot product\n if x.shape[0] == self._size:\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n out = matmul(x[:k], self[self._begin:]).view(ndarray)\n out += matmul(x[k:], self[:self._end]).view(ndarray)\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n out = matmul(x, part)\n\n return(out.view(ndarray))\n else:\n raise ValueError(\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?)\"\n \" (size {n} is different from {m})\".format(\n n=self._size,\n m=x.shape[0]\n )\n )\n elif x.ndim == 1 and self.ndim > 1:\n if x.shape[0] == self.shape[-2]:\n if self.ndim == 2:\n out = empty(self.shape[-1])\n out2 = space[:self.shape[-1]]\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n matmul(x[:k], self[self._begin:], out)\n out += matmul(x[k:], self[:self._end], out2)\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(x, part, out)\n\n return(out)\n else:\n out = empty(\n (self._size, *self.shape[1:-2], self.shape[-1])\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n matmul(x, self[self._begin:], out[:k])\n matmul(x, self[:self._end], out[k:])\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(x, part, out)\n\n return(out)\n else:\n raise ValueError(\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?)\"\n \" (size {n} is different from {m})\".format(\n n=self.shape[-2],\n m=x.shape[0]\n )\n )\n elif x.ndim > 1 and self.ndim == 1:\n if x.shape[-1] == self.shape[0]:\n out = empty(x.shape[:-1])\n out2 = space[:reduce(operator.mul, x.shape[:-1])].reshape(\n x.shape[:-1]\n )\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n matmul(x[..., :, :k], self[self._begin:], out)\n out += matmul(x[..., :, k:], self[:self._end], out2)\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(x, part, out)\n\n return(out)\n else:\n raise ValueError(\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?)\"\n \" (size {n} is different from {m})\".format(\n n=self.shape[-2],\n m=x.shape[0]\n )\n )\n elif self.ndim == 2:\n if (x.shape[-1] == self.shape[-2]):\n out_shape = (*x.shape[:-1], self.shape[-2])\n out = empty(out_shape)\n out2 = space[:reduce(operator.mul, out_shape)].reshape(\n out_shape\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n matmul(x[..., :, :k], self[self._begin:], out)\n out += matmul(x[..., :, k:], self[:self._end], out2)\n\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(x, part, out)\n\n return(out.view(ndarray))\n\n else:\n raise ValueError(\n (\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->\"\n \"(n?,m?) (size {n} is different from {m})\"\n ).format(\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )\n else:\n if (x.shape[-1] == self.shape[-2]):\n self_shape = (self._size, *self.shape[1:-2])\n\n starexpr = tuple(\n zip_longest(self_shape, x.shape[:-2], fillvalue=1)\n )\n if star_can_broadcast(starexpr):\n broadcast_shape = tuple(\n starmap(\n lambda a, b: max(a, b),\n starexpr\n )\n )\n\n out = empty(\n (*broadcast_shape, x.shape[-2], self.shape[-1])\n )\n\n if self.fragmented:\n k = self._capacity - self._begin # fragmentation index\n\n if x.ndim > 2:\n matmul(x[:k], self[self._begin:], out[:k])\n matmul(x[k:], self[:self._end], out[k:])\n else:\n matmul(x, self[self._begin:], out[:k])\n matmul(x, self[:self._end], out[k:])\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n matmul(x, part, out)\n\n return(out.view(ndarray))\n else:\n raise ValueError(\n (\n \"operands could not be broadcast together with\"\n \"remapped shapes [original->remapped]: \"\n \"{shape_b}->({shape_bn}, newaxis,newaxis) \"\n \"{shape_a}->({shape_an}, newaxis,newaxis) \"\n \"and requested shape ({n},{m})\"\n ).format(\n shape_a=self_shape,\n shape_b=x.shape,\n shape_an=self.shape[:-2].__str__()[:-1],\n shape_bn=x.shape[:-2].__str__()[:-1],\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )\n else:\n raise ValueError(\n (\n \"matmul: Input operand 1 has a mismatch in its core \"\n \"dimension 0, with gufunc signature (n?,k),(k,m?)->\"\n \"(n?,m?) (size {n} is different from {m})\"\n ).format(\n n=self.shape[-1],\n m=x.shape[-2]\n )\n )", "def _dot_product_attention_inner_relative(x, y, z, transpose):\n batch_size, heads, length, _ = x.size()\n\n # xy_matmul is [batch_size, heads, length, length or depth]\n xy_matmul = torch.matmul(x, y if not transpose else y.transpose(-2, -1))\n # x_t is [length, batch_size, heads, length or depth]\n x_t = x.permute(2, 0, 1, 3)\n # x_t_r is [length, batch_size * heads, length or depth]\n x_t_r = x_t.view(length, batch_size * heads, -1)\n # x_tz_matmul is [length, batch_size * heads, length or depth]\n x_tz_matmul = torch.matmul(x_t_r, z if not transpose else z.transpose(-2, -1))\n # x_tz_matmul_r is [length, batch_size, heads, length or depth]\n x_tz_matmul_r = x_tz_matmul.view(length, batch_size, heads, -1)\n # x_tz_matmul_r_t is [batch_size, heads, length, length or depth]\n x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)\n\n return xy_matmul + x_tz_matmul_r_t", "def _product(shape):\n result = 1\n for size in shape:\n result *= size\n return result", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def batch_dot_product_sparse(spectra, tdata, nz, use_gpu):\n\n if (use_gpu):\n #Use GPU to do dot products in batch\n return _batch_dot_product_sparse_gpu(spectra, tdata)\n\n #Need to find shape of output array of batch dot product\n nrows = 0\n nbasis = None\n for key in tdata:\n nrows += tdata[key].shape[1]\n if (nbasis is None):\n nbasis = tdata[key].shape[2]\n\n #Create empty array rather than stacking a list - faster\n Tbs = np.empty((nz, nrows, nbasis))\n #Loop over all templates\n for i in range(nz):\n irow = 0\n for s in spectra:\n key = s.wavehash\n curr_tb = s.Rcsr.dot(tdata[key][i,:,:])\n #Copy this dot product result into the Tbs array\n Tbs[i, irow:irow+curr_tb.shape[0],:] = curr_tb\n irow += curr_tb.shape[0]\n return Tbs", "def TensorProduct(**kw_kernels):\n return Composite('*', **kw_kernels)", "def calc_batch_dot_product_3d2d(Tbs, zc, use_gpu):\n\n if (use_gpu):\n return _calc_batch_dot_product_3d2d_gpu(Tbs, zc)\n\n #Get array dims to reshape model array to 2d\n nz = zc.shape[0]\n nrows = Tbs[0].shape[0]\n model = (Tbs@zc[:, :, None]).reshape((nz, nrows))\n return model", "def call(self, inputs, *args, **kwargs):\n batch_dims = inputs.shape[:nest_utils.get_outer_rank(inputs, self._spec)]\n num_batch_elems = tf.reduce_prod(batch_dims)\n transformed_inputs = tf.reshape(inputs, (num_batch_elems, -1))\n result = self._batch(transformed_inputs, *args, **kwargs)\n return tf.reshape(result, inputs.shape)", "def batch_flatten(this,x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def batch_flatten(this,x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def einsum(subscripts, *operands):\n raise NotImplementedError", "def cartesian_product(*arrays):\n length = len(arrays)\n dtype = np.result_type(*arrays)\n arr = np.empty([len(a) for a in arrays] + [length], dtype=dtype)\n for idx, array in enumerate(np.ix_(*arrays)):\n arr[...,idx] = array\n return arr.reshape(-1, length)", "def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def kronecker_operators(*args):\n return reduce(wrapped_kronecker, *args)", "def kronecker_product(mat1, mat2):\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def _kronecker_product(mat1: tf.Tensor, mat2: tf.Tensor) -> tf.Tensor:\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def kron(a, b):\r\n a = tensor.as_tensor_variable(a)\r\n b = tensor.as_tensor_variable(b)\r\n if (a.ndim + b.ndim <= 2):\r\n raise TypeError('kron: inputs dimensions must sum to 3 or more. '\r\n 'You passed %d and %d.' % (a.ndim, b.ndim))\r\n o = tensor.outer(a, b)\r\n o = o.reshape(tensor.concatenate((a.shape, b.shape)),\r\n a.ndim + b.ndim)\r\n shf = o.dimshuffle(0, 2, 1, * range(3, o.ndim))\r\n if shf.ndim == 3:\r\n shf = o.dimshuffle(1, 0, 2)\r\n o = shf.flatten()\r\n else:\r\n o = shf.reshape((o.shape[0] * o.shape[2],\r\n o.shape[1] * o.shape[3]) +\r\n tuple([o.shape[i] for i in range(4, o.ndim)]))\r\n return o", "def cartesian_product(*arrays):\n\n la = len(arrays)\n if la == 0:\n return np.array([])\n dtype = np.result_type(*arrays)\n arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)\n for i, a in enumerate(np.ix_(*arrays)):\n arr[..., i] = a\n return arr.reshape(-1, la)", "def batch_concat(\n values: types.NestedArray,\n num_batch_dims: int = 1,\n) -> jnp.ndarray:\n flatten_fn = lambda x: _flatten.apply(None, x, num_batch_dims)\n flat_leaves = tree.map_structure(flatten_fn, values)\n return jnp.concatenate(tree.flatten(flat_leaves), axis=-1)", "def _generate_mult_process(X, mat, inits):\n M = np.empty_like(X, dtype=float)\n M[..., 0] = inits[X[..., 0]]\n M[..., 1:] = mat[X[..., :-1], X[..., 1:]]\n np.cumprod(M, axis=-1, out=M)\n return M", "def inner_product(alpha, F, beta):\n return np.dot(alpha, np.dot(F, beta))", "def _mean_post_einsum(g: to.Tensor, lpj: to.Tensor) -> to.Tensor:\n return to.einsum(\"ns...,ns->n...\", (g, lpj2pjc(lpj)))", "def _kp(a, b):\n if a.shape != b.shape or a.shape[-1] != 1:\n raise(ValueError)\n N = a.shape[0]\n # take the outer product over the last two axes, then reshape:\n return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)", "def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z", "def prodSumNumpy(*arrays):\n return np.sum(np.prod(arrays,axis=0))", "def cartesian_product(input_sets, elem_size=1):\n import itertools\n out = []\n # ::-1 reverse order to be backwards compatiable with old\n # function below\n for r in itertools.product(*input_sets[::-1]):\n out.append(r)\n out = np.asarray(out).T[::-1, :]\n return out\n\n # try:\n # from pyapprox.cython.utilities import cartesian_product_pyx\n # # # fused type does not work for np.in32, np.float32, np.int64\n # # # so envoke cython cast\n # # if np.issubdtype(input_sets[0][0],np.signedinteger):\n # # return cartesian_product_pyx(input_sets,1,elem_size)\n # # if np.issubdtype(input_sets[0][0],np.floating):\n # # return cartesian_product_pyx(input_sets,1.,elem_size)\n # # else:\n # # return cartesian_product_pyx(\n # # input_sets,input_sets[0][0],elem_size)\n # # always convert to float then cast back\n # cast_input_sets = [np.asarray(s, dtype=float) for s in input_sets]\n # out = cartesian_product_pyx(cast_input_sets, 1., elem_size)\n # out = np.asarray(out, dtype=input_sets[0].dtype)\n # return out\n # except:\n # print('cartesian_product extension failed')\n\n # num_elems = 1\n # num_sets = len(input_sets)\n # sizes = np.empty((num_sets), dtype=int)\n # for ii in range(num_sets):\n # sizes[ii] = input_sets[ii].shape[0]/elem_size\n # num_elems *= sizes[ii]\n # # try:\n # # from pyapprox.weave import c_cartesian_product\n # # # note c_cartesian_product takes_num_elems as last arg and cython\n # # # takes elem_size\n # # return c_cartesian_product(input_sets, elem_size, sizes, num_elems)\n # # except:\n # # print ('cartesian_product extension failed')\n\n # result = np.empty(\n # (num_sets*elem_size, num_elems), dtype=type(input_sets[0][0]))\n # for ii in range(num_elems):\n # multi_index = ind2sub(sizes, ii, num_elems)\n # for jj in range(num_sets):\n # for kk in range(elem_size):\n # result[jj*elem_size+kk, ii] =\\\n # input_sets[jj][multi_index[jj]*elem_size+kk]\n # return result", "def merge_mps_tensor_pair(A0: np.ndarray, A1: np.ndarray) -> np.ndarray:\n A = np.einsum(A0, (0, 2, 3), A1, (1, 3, 4), (0, 1, 2, 4), optimize=True)\n # combine original physical dimensions\n A = A.reshape((A.shape[0]*A.shape[1], A.shape[2], A.shape[3]))\n return A", "def product_nb(a):\n out = np.empty(a.shape[1], dtype=np.float_)\n for col in range(a.shape[1]):\n out[col] = product_1d_nb(a[:, col])\n return out", "def cartesian_product(arrays):\n la = len(arrays)\n dtype = np.find_common_type([a.dtype for a in arrays], [])\n arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)\n for i, a in enumerate(np.ix_(*arrays)):\n arr[..., i] = a\n return arr.reshape(-1, la)", "def kron(*matrices: np.ndarray) -> np.ndarray:\n product = np.eye(1)\n for m in matrices:\n product = np.kron(product, m)\n return np.array(product)", "def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z", "def _product(self, args):\n pools = map(tuple, args) #within original version args defined as *args\n result = [[]]\n for pool in pools:\n result = [x + [y] for x in result for y in pool]\n return result", "def test_broadcast(self):\n a = np.ones((3, 4, 1))\n ai = np.ones((1, 2, 5), dtype=np.intp)\n actual = take_along_axis(a, ai, axis=1)\n assert_equal(actual.shape, (3, 2, 5))", "def asummult (array1,array2,dimension=None,keepdims=0):\r\n if dimension == None:\r\n array1 = N.ravel(array1)\r\n array2 = N.ravel(array2)\r\n dimension = 0\r\n return asum(array1*array2,dimension,keepdims)", "def kron_prod(matList):\n ret = matList[0]\n for i in range(1, len(matList)):\n ret = kron(ret, matList[i])\n return ret", "def batch_integrate(self, batched_values, weights=None):\n if weights is None:\n return batched_values.sum(axis=1) * self.dx\n else:\n return (weights.reshape(1, -1) @ batched_values.T).reshape(-1) * self.dx", "def multikron(a):\n return _reduce(_np.kron, a)", "def call(self, inputs):\r\n outputs = K.spatial_2d_padding(inputs,\r\n padding=self.padding,\r\n data_format=self.data_format)\r\n\r\n p00, p01 = self.padding[0][0], self.padding[0][1]\r\n p10, p11 = self.padding[1][0], self.padding[1][1]\r\n if self.data_format == \"channels_last\":\r\n\r\n row0 = K.concatenate([inputs[:, p00:0:-1, p10:0:-1, :],\r\n inputs[:, p00:0:-1, :, :],\r\n inputs[:, p00:0:-1, -2:-2-p11:-1, :]],\r\n axis=2)\r\n row1 = K.concatenate([inputs[:, :, p10:0:-1, :],\r\n inputs,\r\n inputs[:, :, -2:-2-p11:-1, :]],\r\n axis=2)\r\n row2 = K.concatenate([inputs[:, -2:-2-p01:-1, p10:0:-1, :],\r\n inputs[:, -2:-2-p01:-1, :, :],\r\n inputs[:, -2:-2-p01:-1, -2:-2-p11:-1, :]],\r\n axis=2)\r\n\r\n outputs = K.concatenate([row0, row1, row2], axis=1)\r\n\r\n else: # self.data_format == \"channels_first\"\r\n\r\n row0 = K.concatenate([inputs[:, :, p00:0:-1, p10:0:-1],\r\n inputs[:, :, p00:0:-1, :],\r\n inputs[:, :, p00:0:-1, -2:-2-p11:-1]],\r\n axis=3)\r\n row1 = K.concatenate([inputs[:, :, :, p10:0:-1],\r\n inputs,\r\n inputs[:, :, :, -2:-2-p11:-1]],\r\n axis=3)\r\n row2 = K.concatenate([inputs[:, :, -2:-2-p01:-1, p10:0:-1],\r\n inputs[:, :, -2:-2-p01:-1, :],\r\n inputs[:, :, -2:-2-p01:-1, -2:-2-p11:-1]],\r\n axis=3)\r\n\r\n outputs = K.concatenate([row0, row1, row2], axis=2)\r\n\r\n return outputs", "def batch_vTAv(A: np.ndarray, v: np.ndarray) -> np.ndarray:\n\n \"\"\" Faster than\n Av = np.matmul(A, v[...,:,None]) # [B, X, 1]\n return np.matmul(v[...,None,:], Av).squeeze((-2, -1)) # [B]\n \"\"\"\n\n return np.einsum(\"...k,...kl,...l->...\", v, A, v)", "def __mul__(self, oth):\n\t\tif isinstance(oth, Matrix) or isiterable(oth):\n\t\t\t# matrix\n\t\t\toth_m = oth\n\t\t\tif not isinstance(oth_m, Matrix):\n\t\t\t\toth_m = Matrix(oth_m)\t\t\t\n\t\t\tres_m = self._mat_mul(oth_m)\n\t\t\tif isinstance(oth, Matrix):\n\t\t\t\treturn res_m\n\t\t\telse:\n\t\t\t\treturn type(oth)(res_m._unnest())\n\t\telse:\n\t\t\t# scalar\n\t\t\treturn Matrix._make_new(lambda i,j: self.data[i][j] * oth, self.rows, self.cols)", "def test_cross_multiply_shape():\n array_1 = np.ones((1, 3))\n array_out = utils.cross_multiply_array(array_1, axis=1)\n assert (1, 3, 3) == array_out.shape", "def test_multidimensional_operation(self):\n # start with something (1, 2, 3)\n data = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]])\n\n # split 1st dim (2, 2, 3)\n coefficients = np.ones((1, 2)) / 2\n expected = np.array(\n [[[0.0, 0.5, 1.0], [1.5, 2.0, 2.5]], [[0.0, 0.5, 1.0], [1.5, 2.0, 2.5]]]\n )\n actual = Adaptor.convert_with_coefficients(data, coefficients, 0)\n np.testing.assert_allclose(actual, expected)\n\n # sum 3rd dim (2, 2, 1)\n coefficients = np.ones((3, 1))\n expected = np.array([[[1.5], [6.0]], [[1.5], [6.0]]])\n actual = Adaptor.convert_with_coefficients(actual, coefficients, 2)\n np.testing.assert_allclose(actual, expected)", "def batch(size, iterable):\r\n return list(xbatch(size, iterable))", "def cumprod(a, axis=None, dtype=None, out=None):\n a = astensor(a)\n if dtype is None:\n dtype = np.empty((1,), dtype=a.dtype).cumprod().dtype\n op = TensorCumprod(axis=axis, dtype=dtype)\n return op(a, out=out)", "def einsum(ops, *args):\n\n if len(args) != 2:\n raise ValueError(\"Currently only two operands are supported\")\n\n inops, outops = ops.split('->')\n inops = inops.split(',')\n\n # All indices that are in input AND in output are multiplies\n multiplies = sorted(list(set(inops[0]) & set(inops[1]) & set(outops)))\n # All indices that are in input BUT NOT in output are sum contractions\n sums = sorted(list((set(inops[0]) & set(inops[1])) - set(outops)))\n\n # Map sums and indices to axis integers\n multiplies = [[inop.find(x) for x in multiplies] for inop in inops]\n sums = [[inop.find(x) for x in sums] for inop in inops]\n\n # Find output axes in input axes for final transpose\n # Values very likely lie outside of output tensor shape, so\n # just map them values to their rank (index in ordered list)\n transpose = [''.join(inops).find(x) for x in outops]\n transpose = scipy.stats.rankdata(transpose).astype(int) - 1\n\n return tensordot2(*args, sum=sums, multiply=multiplies).transpose(transpose)", "def matrix_dot(*args):\r\n rval = args[0]\r\n for a in args[1:]:\r\n rval = theano.tensor.dot(rval, a)\r\n return rval", "def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def cumprod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.cumprod,\n dim=dim,\n skipna=skipna,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def cartesian(arrays, out=None):\n\n arrays = [np.asarray(x) for x in arrays]\n dtype = arrays[0].dtype\n\n n = np.prod([x.size for x in arrays])\n if out is None:\n out = np.zeros([n, len(arrays)], dtype=dtype)\n\n m = n / arrays[0].size\n out[:, 0] = np.repeat(arrays[0], m)\n if arrays[1:]:\n cartesian(arrays[1:], out=out[0:m, 1:])\n for j in range(1, arrays[0].size):\n out[j*m:(j+1)*m, 1:] = out[0:m, 1:]\n return out", "def inner_products(t_S, t_Var, t_XS, t_YS, t_XE, t_YE, t_XR, t_YR):\n\n # Note in this computation, we do the indices in this form:\n # b, i, j, t\n # batch, pixel, neuron, time step\n\n # indices: b, i1, j, t\n t_dX = (t_XS.dimshuffle('x', 0, 'x', 'x') -\n t_XE.dimshuffle('x', 'x', 0, 'x') -\n t_XR.dimshuffle(0, 'x', 'x', 1))\n t_dX.name = 'dX'\n # indices: b, i2, j, t\n t_dY = (t_YS.dimshuffle('x', 0, 'x', 'x') -\n t_YE.dimshuffle('x', 'x', 0, 'x') -\n t_YR.dimshuffle(0, 'x', 'x', 1))\n t_dY.name = 'dY'\n\n # Use outer product trick to dot product image with point filters\n t_PixRFCouplingX = T.exp(-0.5 * t_dX ** 2 /\n t_Var.dimshuffle('x', 0, 'x', 'x'))\n t_PixRFCouplingY = T.exp(-0.5 * t_dY ** 2 /\n t_Var.dimshuffle('x', 0, 'x', 'x'))\n t_PixRFCouplingX.name = 'PixRFCouplingX'\n t_PixRFCouplingY.name = 'PixRFCouplingY'\n\n # Matrix of inner products between the images and the retinal RFs\n # indices: b, j, t\n # Sum_i2 T(i2, i1) * T(b, i2, j, t) = T(b, i1, j, t)\n t_IpsY = T.sum(t_S.dimshuffle('x', 0, 1, 'x', 'x') *\n t_PixRFCouplingY.dimshuffle(0, 1, 'x', 2, 3),\n axis=1)\n # Sum_i1 T(b, i1, j, t) * T(b, i2, j, t) = T(b, j, t)\n t_Ips = T.sum(t_IpsY * t_PixRFCouplingX, axis=1)\n t_Ips.name = 'Ips'\n\n # For the gradient, we also prepare d Ips / dS\n # This is in the form b, i2, i1, j, t\n t_PixRFCoupling = (t_PixRFCouplingX.dimshuffle(0, 'x', 1, 2, 3) *\n t_PixRFCouplingY.dimshuffle(0, 1, 'x', 2, 3))\n\n return t_Ips, t_PixRFCoupling", "def cartesian(arrays, out=None):\n\n arrays = [np.asarray(x) for x in arrays]\n dtype = arrays[0].dtype\n\n n = np.prod([x.size for x in arrays])\n if out is None:\n out = np.zeros([n, len(arrays)], dtype=dtype)\n\n m = n / arrays[0].size\n out[:, 0] = np.repeat(arrays[0], m)\n if arrays[1:]:\n cartesian(arrays[1:], out=out[0:m, 1:])\n for j in range(1, arrays[0].size):\n out[j * m:(j + 1) * m, 1:] = out[0:m, 1:]\n return out", "def prod(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.prod,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def cartesian(arrays, out=None):\n\n arrays = [numpy.asarray(x) for x in arrays]\n dtype = arrays[0].dtype\n\n n = numpy.prod([x.size for x in arrays])\n if out is None:\n out = numpy.zeros([n, len(arrays)], dtype=dtype)\n\n m = n / arrays[0].size\n out[:,0] = numpy.repeat(arrays[0], m)\n if arrays[1:]:\n cartesian(arrays[1:], out=out[0:m,1:])\n for j in xrange(1, arrays[0].size):\n out[j*m:(j+1)*m,1:] = out[0:m,1:]\n return out", "def repeat_batch(t, K, dim=0):\n shape = t.shape\n tiling = [1] * (len(shape) + 1)\n tiling[dim + 1] = K\n tiled = t.unsqueeze(dim + 1).repeat(tiling)\n old_bsz = shape[dim]\n new_bsz = old_bsz * K\n new_shape = list(shape[:dim]) + [new_bsz] + list(shape[dim+1:])\n return tiled.view(new_shape)", "def func(batch_of_x0, batch_of_x1):\n # batch_0_result.shape = [..., 2].\n x0, x1 = batch_of_x0[0, ...], batch_of_x1[0, ...]\n batch_0_result = tf.stack([tf.sin(x0 * x1), tf.cos(x0 * x1)], axis=-1)\n\n x0, x1 = batch_of_x0[1, ...], batch_of_x1[1, ...]\n batch_1_result = tf.stack([tf.sin(2 * x0), tf.cos(2 * x1)], axis=-1)\n\n return tf.stack([batch_0_result, batch_1_result], axis=0)", "def _calc_batch_dot_product_3d2d_gpu(Tbs, zc):\n\n #Use batch_dot_product_3d2d kernel to compute model array\n # Load CUDA kernel\n cp_module = cp.RawModule(code=cuda_source)\n batch_dot_product_3d2d_kernel = cp_module.get_function('batch_dot_product_3d2d')\n\n #Array dims needed by CUDA:\n nz = zc.shape[0]\n nrows = Tbs[0].shape[0]\n n = nrows * nz\n nbasis = zc.shape[1]\n\n #Allocate CUPY array and calc blocks to be used\n blocks = (n+block_size-1)//block_size\n model = cp.empty((nz, nrows), cp.float64)\n #Launch kernel and synchronize\n batch_dot_product_3d2d_kernel((blocks,), (block_size,), (Tbs, zc, model, nrows, nbasis, nz))\n #cp.cuda.Stream.null.synchronize()\n return model", "def flatten(inputs, is_batched=True, scope=None):\n with tf.name_scope(scope, 'flatten'):\n shape = get_shape(inputs)\n if is_batched:\n num_units = np.prod(shape[1:])\n return tf.reshape(inputs, [-1, num_units])\n else:\n num_units = np.prod(shape)\n return tf.reshape(inputs, [num_units])", "def call(self, inputs, training=None):\n with tf.device(\"/device:GPU:0\"):\n return tf.reshape(tf.einsum('bj,jk,bk->b', inputs, self.laplacian, inputs), (-1, 1))", "def non_vectorized_loops(self, data):\n\n non_vectorized = np.zeros(data.shape)\n for row in range(data.shape[0]):\n for col in range(data.shape[1]):\n non_vectorized[row][col] = (data[row][col] * data[row][col] +\n data[row][col])\n return non_vectorized", "def batch_dot(x, y, axes=None):\r\n if isinstance(axes, int):\r\n axes = (axes, axes)\r\n x_ndim = K.ndim(x)\r\n y_ndim = K.ndim(y)\r\n if x_ndim > y_ndim:\r\n diff = x_ndim - y_ndim\r\n y = tf.reshape(y, tf.concat([tf.shape(y), [1] * (diff)], axis=0))\r\n elif y_ndim > x_ndim:\r\n diff = y_ndim - x_ndim\r\n x = tf.reshape(x, tf.concat([tf.shape(x), [1] * (diff)], axis=0))\r\n else:\r\n diff = 0\r\n if K.ndim(x) == 2 and K.ndim(y) == 2:\r\n if axes[0] == axes[1]:\r\n out = tf.reduce_sum(tf.multiply(x, y), axes[0])\r\n else:\r\n out = tf.reduce_sum(tf.multiply(\r\n tf.transpose(x, [1, 0]), y), axes[1])\r\n else:\r\n if axes is not None:\r\n adj_x = None if axes[0] == K.ndim(x) - 1 else True\r\n adj_y = True if axes[1] == K.ndim(y) - 1 else None\r\n else:\r\n adj_x = None\r\n adj_y = None\r\n out = tf.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)\r\n if diff:\r\n if x_ndim > y_ndim:\r\n idx = x_ndim + y_ndim - 3\r\n else:\r\n idx = x_ndim - 1\r\n out = tf.squeeze(out, list(range(idx, idx + diff)))\r\n if K.ndim(out) == 1:\r\n out = tf.expand_dims(out, 1)\r\n return out", "def generalized_broadcast(arrays):\n arrays1 = np.broadcast_arrays(*[A[..., 0] for A in arrays])\n shapes_b = [A1.shape + (A.shape[-1],) for A1, A in zip(arrays1, arrays)]\n strides_b = [A1.strides + (A.strides[-1],) for A1, A in zip(arrays1, arrays)]\n arrays_b = [as_strided(A, shape=shape_Ab, strides=strides_Ab)\n for A, shape_Ab, strides_Ab in zip(arrays, shapes_b, strides_b)]\n return arrays_b", "def unroll(self):\n\n return numpy.hstack([k.flat for k in self.weights])", "def add_data_to_start_and_end_for_inner_product(data):\n temp = np.append(data[-1].reshape(1, 2), data, axis=0)\n new_data = np.append(temp, data[0].reshape(1, 2), axis=0)\n\n return new_data", "def e_step(data, p_k, p_i_j):\r\n N = data.shape[0]\r\n K = p_i_j.shape[0]\r\n\r\n p_k_x = np.empty((N, K))\r\n for i in range(N):\r\n for k in range(K):\r\n p_k_x[i, k] = np.prod((p_i_j[k] ** data[i]) * ((1 - p_i_j[k]) ** (1 - data[i])))\r\n p_k_x *= p_k\r\n\r\n p_k_x /= p_k_x.sum(axis=1)[:, np.newaxis]\r\n\r\n return p_k_x", "def dot_product(row, column):\n return reduce(lambda x, y: x + y, [x * y for x, y in zip(row, column)])", "def dot_as_einsum(x: JaxExpression, y: JaxExpression, params: Params) -> Einsum:\n dimension_numbers = params['dimension_numbers']\n (x_contract, y_contract), (x_batch, y_batch) = dimension_numbers\n x_ndim, y_ndim = len(x.shape), len(y.shape)\n letter_iter = einsum.einsum_letters()\n x_dims = ''.join(it.islice(letter_iter, x_ndim))\n y_dims = list(it.islice(letter_iter, y_ndim))\n for x_dim, y_dim in zip(x_contract + x_batch, y_contract + y_batch):\n y_dims[y_dim] = x_dims[x_dim]\n y_dims = ''.join(y_dims)\n out_batch_dims = [x_dims[dim] for dim in x_batch]\n out_dims = out_batch_dims + ([xd for xd in x_dims if xd not in y_dims] +\n [yd for yd in y_dims if yd not in x_dims])\n out_dims = ''.join(out_dims)\n return Einsum(f'{x_dims},{y_dims}->{out_dims}', (x, y))" ]
[ "0.755109", "0.70560527", "0.6960709", "0.6768571", "0.65054774", "0.63410676", "0.61378115", "0.61229986", "0.60099125", "0.5946456", "0.59233963", "0.590922", "0.5882272", "0.587886", "0.5864047", "0.58254915", "0.5783885", "0.5765654", "0.5729165", "0.57191443", "0.55910575", "0.5580287", "0.55212194", "0.5508656", "0.5501536", "0.5501536", "0.5501536", "0.5501536", "0.5462587", "0.5459653", "0.54580605", "0.5456374", "0.5447775", "0.53945124", "0.53475916", "0.533101", "0.52877825", "0.52875024", "0.52672064", "0.5259896", "0.52584046", "0.5247406", "0.5247406", "0.52437854", "0.52343196", "0.523282", "0.5232556", "0.51873404", "0.5178292", "0.5175091", "0.5174796", "0.51692986", "0.5156261", "0.51517653", "0.51502204", "0.5131151", "0.5108888", "0.50968516", "0.5088826", "0.50867724", "0.5083422", "0.50779474", "0.50733596", "0.50703675", "0.50688463", "0.50649863", "0.50558984", "0.50397074", "0.5033113", "0.50267756", "0.5019835", "0.50042385", "0.50036216", "0.5001427", "0.49972972", "0.49904978", "0.49846455", "0.49835986", "0.49829894", "0.4967353", "0.4967353", "0.4967353", "0.49630886", "0.49604145", "0.49587807", "0.4949434", "0.49430734", "0.49383748", "0.49311355", "0.49302495", "0.4928349", "0.4927384", "0.49236482", "0.4922849", "0.49190414", "0.49183813", "0.49156842", "0.49140015", "0.49139613", "0.49091285" ]
0.66920644
4
`probs` values ndarray `k` take the smallest `k` elements, if `reverse` is False and the largest `k` if `reverse` is True `axis` sorting and selection axis.
def batchtopk( probs: np.ndarray, k: Optional[int] = None, axis: int = -1, reverse: bool = False ) -> Tuple[np.ndarray, np.ndarray]: if k is not None and k <= 0: raise ValueError("k must be larger than 0. Use None to chose all elements.") if axis != -1: raise ValueError("Only last axis supported atm") if len(probs.shape) <= 1: raise ValueError("probs must be at least 2-dimensional") if reverse: sign = -1 else: sign = 1 indices = np.argsort(sign * probs, axis=-1) # use argpartition? probs = np.take_along_axis(probs, indices[..., :k], axis=-1) return indices, probs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tflite_top_k_probs(probs, k):\n\n if k > 0:\n return np.flip(probs[0].argsort()[-k:])\n else:\n return np.flip(probs[0].argsort())", "def tf_top_k_probs(probs, k):\n\n if k > 0:\n return probs.argsort()[-k:][::-1]\n else:\n return probs.argsort()[:][::-1]", "def indices_of_top_k(arr, k):\n return np.sort(np.argpartition(np.array(arr), -k)[-k:])", "def permute_via_sort(val, keys, inverse_keys, axis=0):\n # It is *not* safe to use jax.custom_vjp here (see permute_via_gather).\n keys = jax.lax.stop_gradient(keys)\n inverse_keys = jax.lax.stop_gradient(inverse_keys)\n def permute_impl(val):\n # On TPU, sorting scalars by key is faster than a gather.\n _, permuted = jax.lax.sort_key_val(keys, val, dimension=axis)\n return permuted\n def permute_vjp(val):\n permuted = permute_impl(jax.lax.stop_gradient(val))\n def vjpfun(permuted_grad):\n _, val_grad = jax.lax.sort_key_val(\n inverse_keys, permuted_grad, dimension=axis)\n return (val_grad,)\n return permuted, vjpfun\n permute = jax.custom_transforms(permute_impl)\n jax.defvjp_all(permute, permute_vjp)\n return permute(val)", "def reveal_sort(k, D, reverse=False):\n assert len(k) == len(D)\n library.break_point()\n shuffle = types.sint.get_secure_shuffle(len(k))\n k_prime = k.get_vector().secure_permute(shuffle).reveal()\n idx = types.Array.create_from(k_prime)\n if reverse:\n D.assign_vector(D.get_slice_vector(idx))\n library.break_point()\n D.secure_permute(shuffle, reverse=True)\n else:\n D.secure_permute(shuffle)\n library.break_point()\n v = D.get_vector()\n D.assign_slice_vector(idx, v)\n library.break_point()\n instructions.delshuffle(shuffle)", "def get_tops(similarities, k):\n tops = similarities.argsort(axis=1)[:, :k].tolist()\n return tops", "def fetch_top_k(vect, mat, k):\n resultant = np.dot(mat, vect)\n arglist = np.argsort(resultant)\n arglist = arglist[-1:(-1 - k):-1]\n return arglist, resultant", "def sort_k_messed_array(arr, k):\n\n if k == 0:\n return arr\n\n for i in range(len(arr)):\n min_index = find_min_index(arr, i, i + k)\n arr[i], arr[min_index] = arr[min_index], arr[i]\n\n return arr", "def order_preserving_k_max(input_tensor, k):\n ndims = input_tensor.shape.ndims\n \n # get indices of topk elements\n indices = tf.nn.top_k(input_tensor, k, sorted=False).indices#shape [d1,d2..,dn-1,k]\n # sort indices of topk elements\n indices = tf.nn.top_k(indices, k, sorted=True).values#shape [d1,d2..,dn-1,k]\n indices = tf.expand_dims(indices, axis=ndims)#shape [d1,d2..,dn-1,1,k]\n\n # build supporting indices for first n-1 dims\n support = tf.meshgrid(*[tf.range(tf.shape(input_tensor)[d])\n for d in xrange(ndims-1)], indexing='ij')#see numpy.meshgrid\n support = tf.stack(support, axis=ndims-1)#shape [d1,d2..,dn-1,ndims-1]\n support = tf.expand_dims(support, axis=ndims-1)#shape [d1,d2..,dn-1,1,ndims-1]\n support = tf.tile(support, [1]*(ndims-1)+[k, 1])#shape [d1,d2..,dn-1,k,ndims-1]\n\n full_indices = tf.concat([support, indices], axis=ndims)#shape [d1,d2..,dn-1,k,ndims]\n output = tf.gather_nd(input_tensor, full_indices)\n \n return output", "def k_smallest_sorted(a, k):\r\n k_smallest_idxs = np.argpartition(a, k)[:k]\r\n return k_smallest_idxs[np.argsort(a[k_smallest_idxs])]", "def find_top_k(predictions, boxes, k):\r\n\r\n if predictions.shape[0] == 0:\r\n predictions2 = torch.Tensor([]).to(device)\r\n labels2 = torch.Tensor([]).to(device)\r\n boxes2 = torch.Tensor([]).to(device)\r\n scores2 = torch.Tensor([]).to(device)\r\n\r\n else:\r\n predictions0 = predictions\r\n scores0 = torch.max(predictions0, dim=1)[0]\r\n labels0 = torch.argmax(predictions0, dim=1)\r\n boxes0 = boxes\r\n\r\n sort = torch.argsort(scores0, descending=True)\r\n boxes1, labels1, scores1, predictions1 = boxes0[sort], labels0[sort], scores0[sort], predictions0[sort]\r\n\r\n boxes2, labels2, scores2, predictions2 = boxes1[:k], labels1[:k] + 1, scores1[:k], predictions1[:k]\r\n\r\n return predictions2, boxes2, labels2, scores2", "def bboxes_sort(classes, scores, bboxes, top_k = 400):\n# if priority_inside:\n# inside = (bboxes[:, 0] > margin) & (bboxes[:, 1] > margin) & \\\n# (bboxes[:, 2] < 1-margin) & (bboxes[:, 3] < 1-margin)\n# idxes = np.argsort(-scores)\n# inside = inside[idxes]\n# idxes = np.concatenate([idxes[inside], idxes[~inside]])\n idxes = np.argsort(-scores)\n classes = classes[idxes][:top_k]\n scores = scores[idxes][:top_k]\n bboxes = bboxes[idxes][:top_k]\n return classes, scores, bboxes", "def sorted_top_k(item_counts, k):\n # Partitioning runs in O(d) time.\n top_k_unsorted = np.argpartition(-item_counts, k - 1)[:k]\n # Sorting highest k counts runs in O(k * log(k)) time.\n sorting_order = np.argsort(item_counts[top_k_unsorted])[::-1]\n return top_k_unsorted[sorting_order]", "def topk(vec, k):\n vec = torch.topk(vec, k)\n return vec.view(-1).data.tolist()", "def _topk(vec, k):\n # on a gpu, sorting is faster than pytorch's topk method\n #topkIndices = torch.sort(vec**2)[1][-k:]\n # however, torch.topk is more space efficient\n\n # topk on cuda returns what looks like uninitialized memory if\n # vals has nan values in it\n # saving to a zero-initialized output array instead of using the\n # output of topk appears to solve this problem\n topkVals = torch.zeros(k, device=vec.device)\n topkIndices = torch.zeros(k, device=vec.device).long()\n torch.topk(vec**2, k, sorted=False, out=(topkVals, topkIndices))\n\n ret = torch.zeros_like(vec)\n if len(vec.size()) == 1:\n ret[topkIndices] = vec[topkIndices]\n elif len(vec.size()) == 2:\n rows = torch.arange(vec.size()[0]).view(-1,1)\n ret[rows, topkIndices] = vec[rows, topkIndices]\n return ret", "def bboxes_sort(classes, scores, bboxes, top_k=400):\n # if priority_inside:\n # inside = (bboxes[:, 0] > margin) & (bboxes[:, 1] > margin) & \\\n # (bboxes[:, 2] < 1-margin) & (bboxes[:, 3] < 1-margin)\n # idxes = np.argsort(-scores)\n # inside = inside[idxes]\n # idxes = np.concatenate([idxes[inside], idxes[~inside]])\n idxes = np.argsort(-scores)\n classes = classes[idxes][:top_k]\n scores = scores[idxes][:top_k]\n bboxes = bboxes[idxes][:top_k]\n return classes, scores, bboxes", "def argsort_desc(scores):\n return np.column_stack(np.unravel_index(np.argsort(-scores.ravel()), scores.shape))", "def top_k(input, k=1, sorted=True, index_type=dtypes.int32, name=None): # pylint: disable=redefined-builtin\n return gen_nn_ops.top_kv2(\n input, k=k, sorted=sorted, index_type=index_type, name=name\n )", "def partition_arg_topK(matrix, K, axis=0):\n a_part = np.argpartition(matrix, K, axis=axis)\n if axis == 0:\n row_index = np.arange(matrix.shape[1 - axis])\n a_sec_argsort_K = np.argsort(matrix[a_part[0:K, :], row_index], axis=axis)\n return a_part[0:K, :][a_sec_argsort_K, row_index]\n else:\n column_index = np.arange(matrix.shape[1 - axis])[:, None]\n a_sec_argsort_K = np.argsort(matrix[column_index, a_part[:, 0:K]], axis=axis)\n return a_part[:, 0:K][column_index, a_sec_argsort_K]", "def _prob_in_top_k(\n clean_values, noisy_values, noise_stddev, noisy_top_values, k):\n batch = tf.shape(clean_values)[0]\n m = tf.shape(noisy_top_values)[1]\n top_values_flat = tf.reshape(noisy_top_values, [-1])\n # we want to compute the threshold that a particular value would have to\n # exceed in order to make the top k. This computation differs depending\n # on whether the value is already in the top k.\n threshold_positions_if_in = tf.range(batch) * m + k\n threshold_if_in = tf.expand_dims(\n tf.gather(top_values_flat, threshold_positions_if_in), 1)\n is_in = tf.greater(noisy_values, threshold_if_in)\n if noise_stddev is None:\n return tf.to_float(is_in)\n threshold_positions_if_out = threshold_if_in - 1\n threshold_if_out = tf.expand_dims(\n tf.gather(top_values_flat, threshold_positions_if_out), 1)\n # is each value currently in the top k.\n prob_if_in = _normal_distribution_cdf(clean_values - threshold_if_in,\n noise_stddev)\n prob_if_out = _normal_distribution_cdf(clean_values - threshold_if_out,\n noise_stddev)\n prob = tf.where(is_in, prob_if_in, prob_if_out)\n return prob", "def get_top_values(weights, top_k=4):\n top_idx = np.argsort(weights)[-top_k:]\n top_idx = np.flip(top_idx)\n top_values = [weights[i] for i in top_idx]\n return top_idx, top_values", "def _cmplx_sort(p):\n indx = cupy.argsort(cupy.abs(p))\n return cupy.take(p, indx, 0), indx", "def argsort(tensor, axis):\n raise NotImplementedError", "def top_k(m, k):\n ml = m.tolil()\n ms = [_top_k(d, r, k) for d, r in zip(ml.data, ml.rows)]\n return zip(*ms)", "def top_indices(preds, num):\n sort_preds = np.sort(preds, 1)\n sort_preds = np.flip(sort_preds)\n sort_index = np.argsort(preds, 1)\n sort_index = np.flip(sort_index)\n\n print(f\"Top {num} results:\")\n for i in range(num):\n print(sort_index[0][i], sort_preds[0][i])\n\n return 0", "def generate_order(arr, descending=True):\n sorted_indices = torch.argsort(arr, 0, descending=descending)\n return sorted_indices.reshape((len(arr), ))", "def reorder_after_dim_reduction(order):\n arr = sorted(range(len(order)), key=lambda x: order[x])\n return tuple(arr)", "def _my_top_k(x, k):\n if k > 10:\n return tf.nn.top_k(x, k)\n values = []\n indices = []\n depth = tf.shape(x)[1]\n for i in range(k):\n values.append(tf.reduce_max(x, 1))\n argmax = tf.argmax(x, 1)\n indices.append(argmax)\n if i + 1 < k:\n x += tf.one_hot(argmax, depth, -1e9)\n return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1))", "def top_k_accuracy(y_true : np.ndarray, probs: np.ndarray, k: int) -> float:\r\n \r\n # Top k sorted preds\r\n sorted_probs = probs.argsort()[:,-k:]\r\n\r\n # Does the truth intersect with any of the top k predictions?\r\n matches = np.max(sorted_probs == y_true.reshape(-1, 1), axis=1)\r\n return matches.mean()", "def decode(self, start: np.ndarray, end: np.ndarray, topk: int,\n max_answer_len: int, sort_with_prob: bool = True) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n # Ensure we have batch axis\n if start.ndim == 1:\n start = start[None]\n\n if end.ndim == 1:\n end = end[None]\n\n # Compute the score of each tuple(start, end) to be the real answer\n if sort_with_prob:\n candidates = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))\n else:\n candidates = np.expand_dims(start, -1) + np.expand_dims(end, 1)\n\n # Remove candidates with end < start\n candidates[..., np.tri(*candidates.shape[-2:], k=-1, dtype=bool)] = candidates.min() # noqa\n # Remove candidates with end - start > max_answer_len\n candidates[..., ~np.tri(*candidates.shape[-2:], k=max_answer_len - 1, dtype=bool)] = candidates.min() # noqa\n\n # Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)\n scores_flat = candidates.flatten()\n if topk == 1:\n idx_sort = [np.argmax(scores_flat)]\n elif len(scores_flat) < topk:\n idx_sort = np.argsort(-scores_flat)\n else:\n idx = np.argpartition(-scores_flat, topk)[0:topk]\n idx_sort = idx[np.argsort(-scores_flat[idx])]\n\n return np.unravel_index(idx_sort, candidates.shape)", "def as_top_k(\n self,\n k,\n matrix,\n type_name,\n simplify_unitsize_minibatch = True\n ):\n if k < 1:\n raise ValueError('k must be positive but it is %d' % k)\n result = []\n num_entity_sets = matrix.shape[0]\n # Find the indices with the highest valued weights.\n top_k_idx = np.flip(np.argsort(matrix, axis=1)[:, -k:], axis=1)\n row_index = np.arange(num_entity_sets).repeat(k)\n column_index = top_k_idx.reshape(-1)\n # Slice, reshape, and sort descending.\n top_k_weights = np.flip(\n np.sort(\n matrix[row_index, column_index].reshape(num_entity_sets, k),\n axis=1),\n axis=1)\n # Convert column indices into entities.\n for indices, weights in zip(top_k_idx, top_k_weights):\n entities = [\n self.get_entity_name(entity_index, type_name)\n for entity_index in indices\n ]\n result.append(list(zip(entities, weights)))\n if simplify_unitsize_minibatch and len(result) == 1:\n return result[0]\n else:\n return result", "def sortFastND(fitnesses, k, probabilitiesForObjectives, first_front_only=False):\n if k == 0:\n return []\n\n N = len(fitnesses)\n pareto_fronts = []\n\n pareto_fronts.append([])\n pareto_sorted = 0\n dominating_fits = [0] * N\n dominated_fits = [list() for i in range(N)]\n\n # Rank first Pareto front\n for i, fit_i in enumerate(fitnesses):\n for j, fit_j in enumerate(fitnesses[i+1:], i+1):\n if isStochasticallyDominated(fit_i, fit_j,probabilitiesForObjectives):\n dominating_fits[i] += 1\n dominated_fits[j].append(i)\n elif isStochasticallyDominated(fit_j, fit_i,probabilitiesForObjectives):\n dominating_fits[j] += 1\n dominated_fits[i].append(j)\n if dominating_fits[i] == 0:\n pareto_fronts[-1].append(i)\n pareto_sorted += 1\n\n # Rank the next front until all individuals are sorted or the given\n # number of individual are sorted\n if not first_front_only:\n k = min(N, k)\n while pareto_sorted < k:\n pareto_fronts.append([])\n for indice_p in pareto_fronts[-2]:\n for indice_d in dominated_fits[indice_p]:\n dominating_fits[indice_d] -= 1\n if dominating_fits[indice_d] == 0:\n pareto_fronts[-1].append(indice_d)\n pareto_sorted += 1\n\n ranks = {}\n for i, front in enumerate(pareto_fronts):\n for j, idx in enumerate(front):\n front[j] = fitnesses[idx]\n ranks[fitnesses[idx]] = i\n\n return pareto_fronts, ranks", "def sorted(self, key=None, reverse=True, **kwargs):\n def hv_improvement(kernel):\n if kernel.objective_values is None:\n return float('-inf')\n return self._UHVI_indicator(kernel)(kernel.objective_values)\n if key is None:\n key = hv_improvement\n return sorted(self, key=key, reverse=reverse, **kwargs)", "def get_top_k(matrix,k):\n assert k <= matrix.shape[1]\n col_inds = np.argpartition(matrix, -k)[:,-k:].flatten()\n row_inds = np.repeat(range(matrix.shape[0]),k)\n\n vals = matrix[row_inds, col_inds]\n\n return vals, col_inds", "def sort_probs(probs_list):\n return sorted(probs_list, key=lambda x: x[1])", "def kth_smallest_alt(arr1, arr2, k):\n pass", "def predict(probs):\n return np.argmax(probs, axis=0)", "def inverse_argsort(arr):\n\n forward = np.argsort(arr)\n inverse = np.empty_like(forward)\n inverse[forward] = np.arange(len(forward))\n return inverse", "def get_switchy_score_order(x):\n switchy_scores = np.apply_along_axis(switchy_score, axis=0, arr=x)\n return np.argsort(switchy_scores)", "def sort(tensor, axis):\n raise NotImplementedError", "def rank(m, axis=0, method='average', ascending=False, reverse=False):\n if isinstance(m, list):\n m = np.array(m)\n if ascending == reverse: # greater is better (descending order)\n m = -m # take the opposite to inverse rank\n r = np.apply_along_axis(rankdata, axis, m, method=method) # convert values to ranking in all rows or columns\n return process_vote(m, r)", "def get_top_k(similarity, question_ids, paragraph_ids, k):\n n_questions = similarity.shape[0]\n idxs = [np.argsort(similarity[row,:])[-k:][::-1] for row in range(n_questions)]\n out = {question_ids[i]:np.array(paragraph_ids)[idxs[i]] for i in range(n_questions)}\n return out", "def argsort(self, axis=-1, kind=None, order=None):\n return self.__array__().argsort(axis, kind, order)", "def top(self, array, key, k):\n\n return heapq.nlargest(\n k,\n range(len(array)),\n key\n )", "def pagerank(dict_prefs, nitems, eps_search=20):\n prefs_mat=np.zeros((nitems,nitems))\n for k,v in dict_prefs.items():\n if v==0:\n continue\n elif v>0:\n prefs_mat[k[1],k[0]]+=v\n else:\n prefs_mat[k[0],k[1]]-=v\n prefs_mat_orig=prefs_mat.copy()\n eps_grid=list(.5**np.logspace(0,1,eps_search))\n best=-10^5\n best_order=None\n \n for eps in eps_grid:\n prefs_mat=prefs_mat_orig.copy()\n for i in range(nitems):\n prefs_mat[:,i]+=eps\n tot=np.sum(prefs_mat[:,i])\n prefs_mat[:,i]=prefs_mat[:,i]/tot\n\n \n pr=np.ones((nitems,1))/nitems\n for i in range(30):\n pr=prefs_mat.dot(pr)\n lst_pagerank=list(np.argsort(pr.reshape(-1)))\n score_this_order=eval_ordering(lst_pagerank,dict_prefs)\n if score_this_order>best:\n best=score_this_order\n best_order=deepcopy(lst_pagerank)\n return best_order", "def find_k_most_uncertain(nn, sess, X, batch_size=None, k=2,\n pool_size=None):\n\n # Initialization\n results = []\n i = -1\n\n if batch_size is None:\n _batch_size = X.shape[0]\n else:\n _batch_size = batch_size\n\n # Create the pool\n order = np.arange(X.shape[0])\n\n if pool_size is not None:\n order = np.random.choice(X.shape[0], \n min(X.shape[0], pool_size),\n replace=False)\n X_pool = X[order, :].copy()\n else:\n X_pool = X.copy()\n\n # Loop over the batches\n for i in range(X_pool.shape[0]/_batch_size):\n\n feed_dict = {}\n feed_dict[nn.input_tensor] = X_pool[i * _batch_size:\n (i+1) * _batch_size]\n\n # Predict the batch\n pred = sess.run(nn.prediction, feed_dict=feed_dict)\n\n # The most uncertain is the closest to 0.5\n pred = np.abs(0.5 - pred).reshape(-1)\n\n # Sort it by uncertainty\n batch_order = np.argsort(pred)\n pred = pred[batch_order]\n\n # Create associated indices\n to_zip = order[range(i * _batch_size, i * _batch_size + pred.shape[0])]\n to_zip = to_zip[batch_order]\n\n results = kmin(results, zip(pred, to_zip), k)\n\n # Last uncomplete batch\n feed_dict = {}\n feed_dict[nn.input_tensor] = X_pool[(i+1) * _batch_size:]\n\n # Predict the last batch\n pred = sess.run(nn.prediction, feed_dict=feed_dict)\n\n # Sort it by uncertainty\n pred = np.abs(0.5 - pred).reshape(-1)\n batch_order = np.argsort(pred)\n pred = pred[batch_order]\n\n # Create associated indices\n to_zip = order[(i + 1) * _batch_size:]\n to_zip = to_zip[batch_order]\n\n results = kmin(results, zip(pred, to_zip), k)\n\n return [i[1] for i in results]", "def _rank_array(a):\n a = np.array(a)\n b = a.argsort()\n c = np.empty_like(b)\n c[b] = np.arange(1, len(a) + 1)\n return c", "def top_k(indices, words, k):\n inds = np.argpartition(indices, -k)[-k:]\n topkwords = words[inds]\n topkvals = indices[inds]\n top = [(word, val) for word, val in zip(topkwords, topkvals)]\n top = sorted(top, key=lambda t: t[1], reverse=True)\n return top", "def in_top_k(predictions, targets, k, name=None):\n with ops.name_scope(name, \"in_top_k\"):\n return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)", "def topKFrequent(self, nums, k):\n\n # Generate dictionary of frequencies\n frequency = defaultdict(int)\n for num in nums:\n frequency[num] += 1\n\n k_most = []\n # Grab the k most frequent values, sorted descending\n for val in sorted(frequency.values())[::-1][:k]:\n # Append the associated key to k_most if it isn't already there\n k_most.append([key for key, value in frequency.items() if (value == val and not key in k_most)][0])\n return k_most", "def predict(probs):\n # Your code here.\n return np.argmax(probs, axis=1)", "def _decimate_k(self):\n # Setting the seed here once so as to get the same set\n # independent of filters.\n random.seed(1)\n k_sorted = sorted(self.kvec.keys())\n k_selected = []\n for knorm in k_sorted:\n nkmax = len(self.kvec[knorm])\n k_selected.append(random.sample(list(range(nkmax)), min(self.nk, nkmax)))\n return k_sorted, k_selected", "def double_sort(data, last_var=0):\n \n # doing simply np.sort(np.sort(pairs, axis=1), axis=0)\n # would uncouple first and second elements of pairs\n # during the second sorting (axis=0)\n data = np.sort(data, axis=1)\n x_sort = np.argsort(data[:, 0])\n data = data[x_sort]\n \n return data", "def compute_pr_at_k(k, true_labels, test_statistics=None, pvalues=None):\n if (test_statistics is not None) and (pvalues is not None):\n raise ValueError(\"You cannot supply both `test_statistics` and `pvalues`.\")\n\n if test_statistics is not None:\n res = test_statistics\n reverse_sorting = True\n else:\n res = pvalues\n reverse_sorting = False\n\n label_matrix = np.zeros((len(true_labels), len(true_labels)))\n c1 = (true_labels == 0).sum()\n label_matrix[:c1, :c1] = 1\n\n triu_idx = np.triu_indices_from(res, k=1)\n labels_vec = label_matrix[triu_idx]\n res_vec = res[triu_idx]\n\n idx = np.argsort(res_vec)\n if reverse_sorting:\n idx = idx[::-1]\n sorted_labels = labels_vec[idx]\n\n if isinstance(k, int):\n ks = range(1, k + 1)\n else:\n ks = k\n\n precisions = [sorted_labels[:k].mean() for k in ks]\n recalls = [sorted_labels[:k].sum() / sorted_labels.sum() for k in ks]\n\n return precisions, recalls", "def getTopK(counter, tup, k=25):\n adj_list = [] #list of tuples that co occur with tup at least once\n for t in counter.relgram_map[tup]:\n adj_list.append((tup, t)) #add all that appear after tup\n\n\n for i in counter.relgram_map: #find any that appear before tup\n for j in counter.relgram_map[i]:\n if j == tup and i not in adj_list: \n adj_list.append((i, tup))\n\n scores = [(x, SCP(counter, x[0], x[1])) for x in adj_list] \n return sorted(scores, key=lambda x: x[1], reverse=True)", "def sort_backward(mat, mat_index, axis=0):\n if axis == 0:\n mat = np.transpose(mat)\n mat_index = np.transpose(mat_index)\n mat_comb = np.asarray(np.dstack((mat_index, mat)))\n mat_comb_sort = np.asarray(\n [row[row[:, 0].argsort()] for row in mat_comb])\n mat_sort = mat_comb_sort[:, :, 1]\n if axis == 0:\n mat_sort = np.transpose(mat_sort)\n return mat_sort", "def sort_train_labels_knn(Dist, y):\n return y[Dist.argsort(kind='mergesort')]", "def sort_train_labels_knn(Dist, y):\n order = Dist.argsort(kind='mergesort')\n return y[order]", "def k_rank_approximate(doc_matrix, k):\n return []", "def in_top_k_v2(targets, predictions, k, name=None):\n return in_top_k(predictions, targets, k, name)", "def _sort_rows(matrix, num_rows):\n tmatrix = array_ops.transpose(matrix, [1, 0])\n sorted_tmatrix = nn_ops.top_k(tmatrix, num_rows)[0]\n return array_ops.transpose(sorted_tmatrix, [1, 0])", "def argsort(a, axis=-1, kind='quicksort', order=None):\r\n return ArgSortOp(kind, order)(a, axis)", "def argsort(data, reversed=False):\n\n index = np.arange(len(data))\n key = lambda x: data[x]\n sortidx = sorted(index, key=key,reverse=reversed)\n sortidx = np.array(list(sortidx))\n return sortidx", "def get_order(order, gt_idx_v):\n o = np.tile(order, (gt_idx_v.shape[0],1))\n g = np.expand_dims(gt_idx_v, 1)\n o = o - g\n l, c = np.where(o==0)\n return c # order[c[0]] = gt_idx_v[0]", "def predict_top_k(self, inputs_values, k):\n return self.sess.run(self.top_k_softmax, feed_dict={\n self.inputs: inputs_values,\n self.top_k_placeholder: k\n })", "def _get_k_largest(lst, k):\n sorted_lst = sorted([(val, index) for index, val in enumerate(lst)])\n return list(reversed(sorted_lst[-k:]))", "def rank(self, k, arr):\n\n # arr must be sorted\n if not(arr[0] < arr[len(arr)//2] < arr[len(arr)-1]):\n raise ValueError(\"Array must be sorted\")\n\n lo = 0\n hi = len(arr) - 1\n\n while lo <= hi:\n mid = lo + (hi - lo) // 2\n\n if k < arr[mid]:\n hi = mid - 1\n elif k > arr[mid]:\n lo = mid + 1\n else:\n return mid\n\n return -1", "def find_order(input, k):\n if ( k >= len(input) or k < 0 ):\n return None\n elif len(input) == 0:\n return None\n elif len(input) == 1:\n # k should be 0\n assert k == 0\n return input[0]\n else:\n pivot = input[len(input)//2]\n left = [x for x in input if x <= pivot]\n left.remove(pivot)\n \n if len(left) == k:\n return pivot\n elif len(left) > k:\n return find_order(left, k)\n else:\n right = [x for x in input if x > pivot]\n return find_order(right, k-1-len(left))", "def ordered_predictions(xs, ys, preds, reverse=False):\n assert len(xs) == len(ys) == len(preds)\n return list(zip(*[(x, y, z) for x, y, z in sorted(zip(xs, ys, preds), key=lambda pair: pair[2], reverse=reverse)]))", "def k_modes_array2(arr, k):\n k_modes = list() # min-heap\n k_modes.append(0) # 1-indexed\n counter = defaultdict(int)\n for i in range(len(arr)):\n x = arr[i]\n counter[x] += 1\n\n # 1. k_modes min-heap not full yet, add current value to k_modes\n if len(k_modes)-1 < k and x not in [y[1] for y in k_modes[1:]]:\n heappush(k_modes, (counter[x], x))\n\n # 2. k_modes min-heap full and current value count > current min,\n # remove min from k_modes and add current value to k_modes\n elif counter[x] > k_modes[1] and x not in [y[1] for y in k_modes[1:]]:\n heappop(k_modes)\n heappush(k_modes, (counter[x], x))\n\n return sorted(k_modes[1:])", "def kth_largest(arr: list, k: int):\n # Do not search if k is larger than total number of elements\n if k > len(arr):\n raise IndexError\n # Count all numbers\n nums = Counter(arr)\n # Go from the largest to smaller ones\n for key in sorted(nums, reverse=True):\n if nums[key] >= k:\n return key\n else:\n k -= nums[key]", "def calc_ply_order(constraints, targets):\r\n if constraints.sym:\r\n ply_order = np.arange(targets.n_plies // 2 + targets.n_plies % 2)\r\n return ply_order\r\n\r\n order_before_sorting = np.arange(targets.n_plies)\r\n ply_order = np.zeros((targets.n_plies,), int)\r\n ply_order[0::2] = order_before_sorting[\r\n :targets.n_plies // 2 + targets.n_plies % 2]\r\n ply_order[1::2] = order_before_sorting[\r\n targets.n_plies // 2 + targets.n_plies % 2:][::-1]\r\n return ply_order", "def sorting_generator(G,desired_numberofcolumns):\n dimension = len(G)\n\n if dimension == desired_numberofcolumns:\n return G , None\n indexlist = np.argsort(np.linalg.norm( G ,axis=0 ,ord = None))\n sortedG = (G)[:,indexlist]\n G_untouched = sortedG[: , - (desired_numberofcolumns - dimension ): ]\n G_reduced = sortedG[: , : -(desired_numberofcolumns - dimension ) ]\n \n return G_reduced , G_untouched", "def predict_prob(self, x, y, initial_lexsort=True):\n\n x = np.asarray(x).ravel()\n y = np.asarray(y).ravel()\n\n if not x.size or not y.size:\n return (np.nan, np.nan) # Return NaN if arrays are empty\n\n n = np.int64(len(x))\n temp = list(range(n)) # support structure used by mergesort\n\n # this closure recursively sorts sections of perm[] by comparing\n # elements of y[perm[]] using temp[] as support\n # returns the number of swaps required by an equivalent bubble sort\n\n def mergesort(offs, length):\n exchcnt = 0\n if length == 1:\n return 0\n if length == 2:\n if y[perm[offs]] <= y[perm[offs + 1]]:\n return 0\n t = perm[offs]\n perm[offs] = perm[offs + 1]\n perm[offs + 1] = t\n return 1\n length0 = length // 2\n length1 = length - length0\n middle = offs + length0\n exchcnt += mergesort(offs, length0)\n exchcnt += mergesort(middle, length1)\n if y[perm[middle - 1]] < y[perm[middle]]:\n return exchcnt\n # merging\n i = j = k = 0\n while j < length0 or k < length1:\n if k >= length1 or (j < length0 and y[perm[offs + j]] <=\n y[perm[middle + k]]):\n temp[i] = perm[offs + j]\n d = i - j\n j += 1\n else:\n temp[i] = perm[middle + k]\n d = (offs + i) - (middle + k)\n k += 1\n if d > 0:\n exchcnt += d\n i += 1\n perm[offs:offs + length] = temp[0:length]\n return exchcnt\n\n # initial sort on values of x and, if tied, on values of y\n if initial_lexsort:\n # sort implemented as mergesort, worst case: O(n log(n))\n perm = np.lexsort((y, x))\n else:\n # sort implemented as quicksort, 30% faster but with worst case: O(n^2)\n perm = list(range(n))\n perm.sort(key=lambda a: (x[a], y[a]))\n\n # compute joint ties\n first = 0\n t = 0\n for i in range(1, n):\n if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]:\n t += ((i - first) * (i - first - 1)) // 2\n first = i\n t += ((n - first) * (n - first - 1)) // 2\n\n # compute ties in x\n first = 0\n u = 0\n for i in range(1, n):\n if x[perm[first]] != x[perm[i]]:\n u += ((i - first) * (i - first - 1)) // 2\n first = i\n u += ((n - first) * (n - first - 1)) // 2\n\n # count exchanges\n exchanges = mergesort(0, n)\n # compute ties in y after mergesort with counting\n first = 0\n v = 0\n for i in range(1, n):\n if y[perm[first]] != y[perm[i]]:\n v += ((i - first) * (i - first - 1)) // 2\n first = i\n v += ((n - first) * (n - first - 1)) // 2\n\n tot = (n * (n - 1)) // 2\n if tot == u or tot == v:\n return 0\n # return (np.nan, np.nan) # Special case for all ties in both ranks\n\n p_k = (((tot - (v + u - t)) - 2.0 * exchanges) / (tot - u) + 1) / 2\n\n return p_k", "def sort_forward(mat, axis=0):\n if axis == 0:\n mat = np.transpose(mat)\n (nrow, ncol) = mat.shape\n list_index = np.arange(0.0, ncol, 1.0)\n mat_index = np.tile(list_index, (nrow, 1))\n mat_comb = np.asarray(np.dstack((mat_index, mat)))\n mat_comb_sort = np.asarray(\n [row[row[:, 1].argsort()] for row in mat_comb])\n mat_sort = mat_comb_sort[:, :, 1]\n mat_index = mat_comb_sort[:, :, 0]\n if axis == 0:\n mat_sort = np.transpose(mat_sort)\n mat_index = np.transpose(mat_index)\n return mat_sort, mat_index", "def _decode_by_order(self):\n self.best_path = []\n S = np.argsort(-self.prob)\n for i in range(self.N):\n for idx in S[i]:\n if idx not in self.best_path:\n self.best_path.append(idx)\n break \n self.best_path = np.array(self.best_path, dtype = np.int)\n if len(self.best_path) == self.N:\n return True\n else:\n return False", "def reverseSelectionSort(l):\r\n for k in range(len(l) - 1):\r\n\r\n min_pos = k\r\n for j in range(k + 1, len(l)):\r\n if l[j] > l[min_pos]:\r\n min_pos = j\r\n\r\n l[min_pos], l[k] = l[k], l[min_pos]", "def _argsort(seq):\n # http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python\n return sorted(range(len(seq)), key=seq.__getitem__)", "def topk_accuracies(preds, labels, ks):\n num_topks_correct = topks_correct(preds, labels, ks)\n return [(x / preds.size(0)) * 100.0 for x in num_topks_correct]", "def fn(arr, k):\n ans = []\n for i, x in enumerate(arr): \n while ans and ans[-1] < x and len(ans) + len(arr) - i > k: ans.pop()\n if len(ans) < k: ans.append(x)\n return ans", "def unfold_axis(data, k):\n\n target_dim = k\n total_dim = len(data.shape)\n\n dim_list = []\n for i in range(total_dim):\n dim_list.append((target_dim - i) % total_dim)\n dim_order = tuple(dim_list)\n\n data_unfold = np.transpose(data,dim_order)\n data_unfold = np.reshape(data_unfold,[data.shape[k],int(data.size/data.shape[k])])\n return data_unfold", "def _get_ranking_orders(label_scores, predicted_scores, top_k_int=1, use_predicted_order=False):\n # sort predictions_scores and label_scores\n # size [batch_size/num of DataRecords, 1]\n # label_scores = tf.Print(label_scores, [label_scores], 'label_scores: \\n', summarize=200)\n # predicted_scores = tf.Print(predicted_scores, [predicted_scores], 'predicted_scores: \\n', summarize=200)\n predicted_scores = tf.reshape(predicted_scores, [-1, 1])\n label_scores = tf.reshape(label_scores, [-1, 1])\n if not use_predicted_order:\n # sort predicitons and use the indices to obtain the relevance scores of the predicted order\n sorted_predictions, ordered_predictions_indices = tf.nn.top_k(\n tf.transpose(predicted_scores), k=top_k_int)\n ordered_predictions_indices_for_labels = tf.transpose(ordered_predictions_indices)\n # predicted_order contians the relevance scores of the predicted order\n predicted_order = tf.gather_nd(label_scores, ordered_predictions_indices_for_labels)\n\n # !!!!! actions sudo predicted_scores (descending)\n else:\n indices = tf.range(top_k_int)\n predicted_order = tf.gather(predicted_scores, indices)\n sorted_predictions = tf.gather(predicted_scores, indices)\n # label_scores = tf.reshape(glrank_complete_labels, [-1, 1])\n \n # sorted_labels contians the relevance scores of the correct order\n sorted_labels, ordered_labels_indices = tf.nn.top_k(\n tf.transpose(label_scores), k=top_k_int)\n sorted_labels = tf.transpose(sorted_labels)\n\n # sorted_labels = tf.Print(sorted_labels, [sorted_labels], 'sorted_labels: \\n', summarize=200)\n # predicted_order = tf.Print(predicted_order, [predicted_order], 'predicted_order: \\n', summarize=200)\n return sorted_labels, predicted_order, sorted_predictions", "def get_top_predictions(preds, top=5):\n results = []\n for pred in preds:\n top_indices = pred.argsort()[-top:][::-1]\n # result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]\n # result.sort(key=lambda x: x[2], reverse=True)\n # results.append(result)\n return top_indices", "def js_sort(d1, d2, beta):\n n = d1.shape[0]\n n_selected = int(n // (1.0 + beta))\n d1_selected = torch.topk(d1, n_selected, largest=False, sorted=False)[0]\n return js_div(d1_selected, d2)", "def knn0(pnts, p, k):\r\n p = np.asarray(p)\r\n pnts = np.asarray(pnts)\r\n diff = pnts - p[np.newaxis, :]\r\n d = np.einsum('ij,ij->i', diff, diff)\r\n idx = np.argsort(d)[:k]\r\n# s = [i.tolist() for i in pnts[idx]]\r\n return pnts[idx].tolist()", "def get_k_of_each(y, k):\n if len(y.shape) != 2:\n raise ValueError(\"This function expects a 2D array.\")\n\n ixes = []\n ymax = np.argmax(y, axis=1)\n\n for i in range(y.shape[1]):\n ixes_i = np.where(ymax == i)[0]\n ixes.append(npr.choice(ixes_i, min(len(ixes_i), k), replace=False))\n\n return np.concatenate(ixes)", "def pointless_sort(x):\n return np.array([1,2,3])", "def findKthLargest(self, nums: List[int], k: int) -> int:\n return sorted(nums)[-k]", "def tensor_resort(inputs, tensor_order):\n pass", "def find_max_in_array(arr, k):\r\n print(\" Amazon interview question\")\r\n arr[:] = sorted(arr)\r\n return ((arr[len(arr)-k]))", "def get_top_k(leaderboard, k):\n configurations = pd.DataFrame.from_dict(leaderboard, orient='index')\n configurations = configurations.sort_values(\n ['score'], ascending=False).reset_index(drop=True).head(k)\n configurations = configurations.to_dict(orient='index')\n\n return configurations", "def sortbatch(q_batch, a_batch, q_lens, a_lens):\n maxlen_q = max(q_lens)\n maxlen_a = max(a_lens)\n q=q_batch[:,:maxlen_q-1]\n a=a_batch[:,:maxlen_a-1]\n sorted_idx = torch.LongTensor(a_lens.numpy().argsort()[::-1].copy())\n return q[sorted_idx], a[sorted_idx], q_lens[sorted_idx], a_lens[sorted_idx]", "def top_k_frequent_elements(nums, k):\r\n freq_dict = {}\r\n for elem in nums:\r\n freq_dict[elem] = freq_dict.get(elem, 0) + 1\r\n \r\n return sorted(freq_dict.keys(), key= lambda x: freq_dict[x], reverse=True)[:k]", "def largestSubarray(self, nums: List[int], k: int) -> List[int]:\n\n if k == 1:\n return [max(nums)]\n\n hash_map = {}\n for i, n in enumerate(nums):\n hash_map[n] = i\n\n candidates = nums[: len(nums) - k + 1]\n print(candidates)\n mx = max(candidates)\n mx_idx = hash_map[mx]\n op = nums[mx_idx : mx_idx + k]\n return op", "def selection_sort_max_version(arr):\n # No need to sort\n if arr is None:\n return arr\n\n n = len(arr)\n if n <= 1:\n return arr\n\n # i - range in order\n # j - range out of order\n for i in range(n - 1, 0, -1):\n max_index = i\n j = i - 1\n\n # select max element in range[0, j]\n while j >= 0:\n if arr[j] > arr[max_index]:\n max_index = j\n j -= 1\n\n arr[i], arr[max_index] = arr[max_index], arr[i]\n\n return arr", "def solve_sort(self):\n if self.k < 0 or self.k > len(self.numbers):\n return None\n\n self.numbers.sort() # in place\n return self.numbers[-self.k]", "def sort(self, Ncol, order):\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\n self.arraydata = sorted(self.arraydata, key=operator.itemgetter(Ncol)) \n if order != Qt.DescendingOrder:\n self.arraydata.reverse()\n self.emit(SIGNAL(\"layoutChanged()\"))", "def sort(self, Ncol, order):\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\n self.arraydata = sorted(self.arraydata, key=operator.itemgetter(Ncol)) \n if order == Qt.DescendingOrder:\n self.arraydata.reverse()\n self.emit(SIGNAL(\"layoutChanged()\"))", "def torch_searchsort(x, xs):\n x = torch.cat((torch.tensor([-10.0]), x, torch.tensor([10000.0])))\n ind = 0\n for i in range(1, len(x) - 1):\n if x[i-1] < xs and xs <= x[i]:\n return i - 1", "def quick_select(arr: list, k: int) -> int:\n start, end = 0, len(arr) - 1\n\n while start <= end:\n pivot = randint(start, end)\n arr[pivot], arr[end] = arr[end], arr[pivot] # important in case of random pivot\n pivot = end\n i, j = start - 1, start\n\n while j < end:\n if arr[j] < arr[pivot]:\n i += 1\n arr[i], arr[j] = arr[j], arr[i]\n j += 1\n\n arr[j], arr[i + 1] = arr[i + 1], arr[j]\n\n if i + 1 == k - 1:\n return arr[i + 1]\n elif i + 1 < k - 1:\n start = i + 2\n else:\n end = i\n\n return -1" ]
[ "0.7283214", "0.6759031", "0.58316225", "0.58071005", "0.5797564", "0.57924837", "0.5782279", "0.5650926", "0.55970734", "0.5593298", "0.558886", "0.5553999", "0.5523884", "0.548393", "0.5477123", "0.5454491", "0.5419435", "0.5367509", "0.5322573", "0.5320744", "0.5307545", "0.5302082", "0.52796197", "0.5278191", "0.5250317", "0.5230142", "0.516807", "0.5133175", "0.5130909", "0.5104751", "0.5098642", "0.50839996", "0.5064968", "0.50545835", "0.50479114", "0.5034903", "0.50319123", "0.5004009", "0.49906766", "0.49864015", "0.4979873", "0.4965521", "0.49624002", "0.4941853", "0.49328426", "0.4921094", "0.4910862", "0.49097356", "0.48929515", "0.48792988", "0.48596242", "0.48569548", "0.48496673", "0.4841476", "0.48218372", "0.48177373", "0.480591", "0.47919494", "0.47896096", "0.47810322", "0.47808662", "0.4777107", "0.4772489", "0.47680345", "0.47597682", "0.47594777", "0.47548327", "0.47503525", "0.4727103", "0.4723197", "0.47140893", "0.4712669", "0.47071186", "0.47015926", "0.4696484", "0.46947676", "0.46877828", "0.46867877", "0.46863532", "0.46859643", "0.46850568", "0.46724215", "0.46673438", "0.46651655", "0.46636325", "0.46628103", "0.46563426", "0.46553388", "0.46482033", "0.46388906", "0.46373463", "0.46371487", "0.46314868", "0.46291628", "0.4616019", "0.4608813", "0.46057755", "0.45979854", "0.45868048", "0.45822358" ]
0.7433971
0
Calcuates the sum of the logs of the diagonal elements (batchwise if necessary)
def logtrace(m: np.ndarray) -> np.ndarray: """ note: performance cannot easily be improve by numba. `np.diagonal` not supported by numba 0.52.0 """ return np.sum(np.log(np.diagonal(m, axis1=-2, axis2=-1)), axis=-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trace(X):\r\n return extract_diag(X).sum()", "def trace(X):\n return extract_diag(X).sum()", "def ln_sum_i_neq_j(x):\n\tiw_size = x.size(0)\n\tbatch_size = x.size(1)\n\n\t# TODO: Would torch.expand instead of torch.repeat make this faster?\n\tinv_mask = torch.eye(iw_size).unsqueeze(dim=2).repeat(1, 1, batch_size)\n\tx_masked = x.view(1, iw_size, batch_size) - inv_mask*1000000.0\n\treturn logsumexp(x_masked, dim=1)", "def logsum_safe(prob_ll, atl):\n logpdf = prob_ll + K.log(atl + K.epsilon())\n alpha = tf.reduce_max(logpdf, -1, keepdims=True)\n return alpha + tf.log(tf.reduce_sum(K.exp(logpdf-alpha), -1, keepdims=True) + K.epsilon())", "def _compute_log_xi_sum(n_samples, n_components, fwdlattice, \\\n log_transmat, bwdlattice, batch_framelogprob, \\\n log_xi_sum, logprob, mask):\n\n batch_size=batch_framelogprob.shape[0]\n work_buffer = torch.zeros((batch_size, \\\n log_transmat.shape[0], \\\n log_transmat.shape[1]), \\\n device=mask.device)\n log_transmat = log_transmat.reshape(1,n_components,n_components).repeat(batch_size,1,1)\n \n \n for t in range(n_samples - 1):\n for i in range(n_components):\n work_buffer[:, i,:] = fwdlattice[:, t, i].reshape(-1, 1) + \\\n log_transmat[:, i, :] + \\\n batch_framelogprob[:, t+1, :] + \\\n bwdlattice[:, t+1, :] \\\n - logprob.reshape(-1,1)\n\n log_xi_sum = _logaddexp(log_xi_sum, work_buffer, mask[:,t+1]) \n\n return log_xi_sum", "def U(xs):\n ret = 0\n for x in xs:\n ret += log(x)\n return ret", "def log_cum_sum(A, output_sum=False):\n C = [A[0]]\n for a in A[1:]:\n C.append(log_add(C[-1], a))\n C_norm = np.array(C) - C[-1]\n if output_sum:\n return C_norm, C[-1]\n else:\n return C_norm", "def sum_log(*args):\n # if all(a == LOG_ZERO for a in args):\n # return LOG_ZERO\n a_max = np.max(args, 0)\n lsp = np.log(np.sum([np.exp(a - a_max) for a in args], 0))\n return a_max + lsp", "def _trace_sparse(op):\n return np.sum(op.diagonal())", "def nll_diagonal(self, target, mu, logvar):\n precision = torch.exp(-logvar)\n # Loss kernel\n loss = precision * (target - mu)**2.0 + logvar\n # Restore prefactors\n loss += np.log(2.0*np.pi)\n loss *= 0.5\n return torch.mean(torch.sum(loss, dim=1), dim=0)", "def sum_diag(max_lines):\r\n dsum = 1 # sum of diagonals\r\n cpt = 1 # number of lines processed\r\n val = 1 # value of the current place in the square\r\n inc = 0 # the increment between number for one line\r\n \r\n while cpt < max_lines:\r\n cpt += 2\r\n inc += 2\r\n \r\n for corner in range(4):\r\n val += inc\r\n dsum += val\r\n\r\n return dsum", "def log_sum_exp(x):\n # TF ordering\n axis = len(x.shape) - 1\n m = paddle.max(x, axis=axis)\n m2 = paddle.max(x, axis=axis, keepdim=True)\n return m + paddle.log(paddle.sum(paddle.exp(x - m2), axis=axis))", "def logsumexp(logv):\n res = logzero()\n for val in logv:\n res = logsum_pair(res, val)\n return res", "def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return ConstantDiagLinearOperator(self.diag_values.log(), diag_shape=self.diag_shape)", "def loglloop(store):\n suml=0.0\n for i in xrange(store['yvec'].shape[0]):\n xbeta=dot(store['xmat'][i,:],store['beta'])\n suml=suml+store['yvec'][i] * xbeta - exp(xbeta)\n return suml", "def trace(self):\n # TODO 异常 非常规输入处理\n if not self.is_square():\n raise(\n ValueError, \"Cannot calculate the trace of a non-square matrix.\")\n # TODO Calculates the main diagonal num's sum\n sum = 0\n for i in range(self.h):\n sum += self[i][i]\n\n return sum", "def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.log())", "def log_sum_exp(vec):\r\n\r\n\r\n max_score, idx = torch.max(vec, -1, keepdim = True) # ( B, to_target, 1)\r\n # max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M\r\n # max_score.expand_as(vec)\r\n # to_target = vec.size(1)\r\n\r\n return max_score.squeeze(-1) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), -1)) # B * to_target\r", "def log_sum_exp_pytorch(vec: torch.Tensor) -> torch.Tensor:\n maxScores, idx = torch.max(vec, 1)\n maxScores[maxScores == -float(\"Inf\")] = 0\n maxScoresExpanded = maxScores.view(vec.shape[0] ,1 , vec.shape[2]).expand(vec.shape[0], vec.shape[1], vec.shape[2])\n return maxScores + torch.log(torch.sum(torch.exp(vec - maxScoresExpanded), 1))", "def logrels(rets):\n return np.log(rets + 1)", "def logsumexp_trick(sum_term):\n max_term = np.max(sum_term)\n return max_term + np.log(np.sum(np.exp(sum_term-max_term)))", "def log_sum_exp(x, dim=0):\n max_x = torch.max(x, dim)[0]\n new_x = x - max_x.unsqueeze(dim).expand_as(x)\n return max_x + (new_x.exp().sum(dim)).log()", "def diagonalSum(M):\n diogonalSum = []\n print(M)\n for i in xrange(0, M.shape[0]):\n diogonalSum += M[i][i]\n print(i)\n \n #print(diogonalSum)\n return diogonalSum", "def _horizontal_log(self, X: np.ndarray) -> (np.ndarray, np.ndarray):\n ret_p = np.zeros_like(X)\n ret_n = np.zeros_like(X)\n log_p = self.manifold.log(X[:, :-1], X[:, 1:])\n log_n = self.manifold.log(X[:, 1:], X[:, :-1])\n ret_p[:, :-1] = log_p\n ret_n[:, 1:] = log_n\n return ret_p, ret_n", "def trace(matrix):\n\n if len(matrix[0]) == 0:\n return 0.0\n \n return float(sum(matrix[i][i] for i in range(len(matrix))))", "def logm(self, x):\n\n if K.backend() == 'theano':\n # construct theano tensor operation\n from theano.tensor.nlinalg import svd, diag\n from theano.tensor.elemwise import Elemwise\n from theano.scalar import log\n import theano.tensor as T\n # This implementation would be extremely slow. but efficient?\n u, d, v = svd(x)\n d += self.eps\n inner = diag(T.log(d))\n res = T.dot(u, T.dot(inner, v))\n return res\n else:\n from kyu.tensorflow.ops.svd_gradients import batch_matrix_log\n return batch_matrix_log(x, self.eps)", "def _log_sum_exp(x):\n axis = len(x.get_shape())-1\n m = tf.reduce_max(x, axis)\n m2 = tf.reduce_max(x, axis, keep_dims=True)\n return m + tf.log(tf.reduce_sum(tf.exp(x-m2), axis))", "def calculate_log_perplexity(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log2(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def logsumexp(input_matrix, reduction_indices=1, keep_dims=False):\r\n\r\n max_input_matrix1 = input_matrix.max(reduction_indices, keepdims=keep_dims)\r\n max_input_matrix2 = max_input_matrix1\r\n if not keep_dims:\r\n max_input_matrix2 = np.expand_dims(max_input_matrix2, reduction_indices)\r\n return np.log(\r\n np.sum(\r\n np.exp(input_matrix - max_input_matrix2),\r\n reduction_indices,\r\n keepdims=keep_dims)) + max_input_matrix1", "def log_sum_exp(x):\n x_max = x.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max", "def diag_multidim_gaussian_log_likelihood(z_u, mean_u, logvar_u, varmin):\n return np.sum(diag_gaussian_log_likelihood(z_u, mean_u, logvar_u, varmin), axis=0)", "def log(self) -> np.ndarray:\n S = 0.5*(self.A-self.A.T) # Skew-symmetric matrix\n y = np.array([S[2, 1], -S[2, 0], S[1, 0]]) # Axis\n if np.allclose(np.zeros(3), y):\n return np.zeros(3)\n y2 = np.linalg.norm(y)\n return np.arcsin(y2)*y/y2", "def log_sum_exp(x, axis=None):\n xmax = x.max(axis=axis, keepdims=True)\n xmax_ = x.max(axis=axis)\n return xmax_ + T.log(T.exp(x - xmax).sum(axis=axis))", "def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))", "def log_sum_exp(x):\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis)\n m2 = tf.reduce_max(x, axis, keep_dims=True)\n return m + tf.log(tf.reduce_sum(tf.exp(x - m2), axis))", "def calculate_entropy(reduced_density_matrix_evals):\n vec_xlogx = vectorize(calculate_xlogx)\n result = -sum(vec_xlogx(reduced_density_matrix_evals,\n\t float_info.epsilon))\n return result", "def take_log_weights(self, data):\n\n n_row = data[:, 0].size\n log_data = np.zeros(data.shape)\n for i in xrange(data.shape[0]):\n idx_nonzero = (data[i, :] > 0).nonzero()[0]\n log_data[i, idx_nonzero] = np.log(data[i, idx_nonzero])\n return log_data", "def tent(x: torch.Tensor) -> torch.Tensor:\n return -(x.softmax(1) * x.log_softmax(1)).sum(1).mean(0)", "def logsumexp(x, axis=None):\n raise NotImplementedError", "def calculate_cross_entropy(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def convert_logsumexp(g, op, block):\n\n input_x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n if op.attr(\"reduce_all\"):\n axis = None\n keepdims = op.attr(\"keepdim\")\n out = get_relay_op(\"logsumexp\")(input_x, axis=axis, keepdims=keepdims)\n if not axis and not keepdims:\n out = _op.expand_dims(out, axis=0)\n g.add_node(op.output(\"Out\")[0], out)", "def column_sums(square):\n total = 0", "def log_entropy(dm):\n size = len(dm)\n entropy = 0\n w, v = np.linalg.eig(dm)\n for n in range(size):\n if w[n] != 0:\n entropy = entropy - w[n] * np.log2(w[n])\n return entropy", "def grad_log(self, X):\n # \"\"\"\n # Evaluate the gradients (with respect to the input) of the log density at\n # each of the n points in X. This is the score function.\n\n # X: n x d numpy array.\n XB = np.dot(X, self.B)\n Y = 0.5*XB + self.c\n E2y = np.exp(2*Y)\n # n x dh\n Phi = old_div((E2y-1.0),(E2y+1))\n # n x dx\n T = np.dot(Phi, 0.5*self.B.T)\n S = self.b - X + T\n return S", "def log_norm(log_x):\n c = np.max(log_x)\n\n if np.isinf(c):\n return c\n\n sum_exp = 0\n\n for x in log_x:\n sum_exp += np.exp(x - c)\n\n log_sum_exp = np.log(sum_exp)\n\n log_Z = log_sum_exp + c\n\n return log_Z", "def fisher_diag(\n negative_log_likelihood: LossFun,\n params: Any,\n inputs: jnp.ndarray,\n targets: jnp.ndarray,\n) -> jnp.DeviceArray:\n return jnp.square(\n ravel(jax.grad(negative_log_likelihood)(params, inputs, targets)))", "def log_sum_exp(Z):\n return np.max(Z) + np.log(np.sum(np.exp(Z - np.max(Z))))", "def _vertical_log(self, X: np.ndarray) -> (np.ndarray, np.ndarray):\n ret_p = np.zeros_like(X)\n ret_n = np.zeros_like(X)\n log_p = self.manifold.log(X[:-1], X[1:])\n log_n = self.manifold.log(X[1:], X[:-1])\n ret_p[:-1] = log_p\n ret_n[1:] = log_n\n return ret_p, ret_n", "def logp_sum(self, *args, **kwargs):\n ## CHANGED\n #return tt.sum(self.logp(*args, **kwargs))\n return S.tsum(self.logp(*args, **kwargs))", "def normalize_log_likelihoods(X):\n h, w = np.shape(X)\n return X - np.tile(logsumexp(X, axis=0), (h, 1))\n # return X - np.matlib.repmat(logsumexp(X, axis=0), h, 1)", "def _log_add(*values):\n x = max(values)\n if x > -np.inf:\n sum_diffs = 0\n for value in values:\n sum_diffs += 2 ** (value - x)\n return x + np.log2(sum_diffs)\n else:\n return x", "def cross_entropy_cost(m, A, L):\n\tcost = (-1 / m) * np.sum(L * np.log(A) + (1 - L) * (np.ma.log(1 - A))) #Note: Using numpy masked array np.ma for values of log(0)\n\n\n\t# Sanity checks\n\tcost = np.squeeze(cost) \t#squeeze() removes single dimensional elements from the array: e.g. (1, 3, 1) -> (3,)\n\tassert(cost.shape == ()) \t#checks if cost value is a scalar\n\n\treturn cost", "def sumlogoddsscore(pwm_dictionary, seq):\n if \"N\" in seq:\n return 0\n else:\n # pwm_length = len(pwm_dictionary)\n pwm_length = len(pwm_dictionary[\"A\"])\n sum_log_odds = 1\n pwm_dictionary_rc = rc_pwm(pwm_dictionary, pwm_length)\n for i in range(len(seq) - 1):\n log_odds = 0\n log_odds_rc = 0\n for j in range(pwm_length - 1):\n if (j + i) >= len(seq):\n log_odds += 0.0\n log_odds_rc += 0.0\n elif seq[j + i] not in [\"A\", \"C\", \"G\", \"T\"]:\n log_odds += 0.0\n log_odds_rc += 0.0\n else:\n q = pwm_dictionary[seq[j + i]][j]\n q_rc = pwm_dictionary_rc[seq[j + i]][j]\n if q == 0 or (q_rc == 0):\n q = 0.000000000000000000000000000001\n # make this as close to zero as possible\n q_rc = 0.000000000000000000000000000001\n else:\n q = pwm_dictionary[seq[j + i]][j]\n q_rc = pwm_dictionary_rc[seq[j + i]][j]\n log_odds += (np.log(q / 0.25) / np.log(2)) * 100\n log_odds_rc += (np.log(q_rc / 0.25) / np.log(2)) * 100\n sum_log_odds += log_odds + log_odds_rc\n return sum_log_odds / 2", "def log_sum_exp(x):\n log_reduce_sum = P.ReduceSum()\n log = P.Log()\n exp = P.Exp()\n x_max = max(x.data)\n return log(log_reduce_sum(exp(x - x_max), 1)) + x_max", "def kld(mu, log_var):\n return (mu + log_var).sum() # TODO Your code goes here.", "def _logaddexp(a, b, mask):\n output = torch.zeros_like(a)\n # find the mask to output b when a contain -inf values\n out_put_b_mask = torch.isinf(a) & (a < 0)\n\n # find the mask to output a when b contain -inf values\n out_put_a_mask = torch.isinf(b) & (b < 0)\n # in order not to take the padded number into account\n # stop do accumulating when iteration gets in padded data\n out_put_a_mask = out_put_a_mask | ~ mask[:, None, None]\n\n # if no singularity cases happen, set the masks for logsumexp computations\n rest_mask = ~(out_put_a_mask | out_put_b_mask)\n\n # set value for found masks\n output[out_put_b_mask] = b[out_put_b_mask]\n output[out_put_a_mask] = a[out_put_a_mask]\n c = torch.cat((a[None,:], b[None,:]), dim=0)\n output[rest_mask] = torch.logsumexp(c, dim=0)[rest_mask]\n \n return output", "def problem():\n\n print 'problem #28'\n\n depth = 1\n numbers = 0\n s = 0\n for x in xrange(1, 1001**2+1):\n numbers += 1\n if depth == 1 or numbers % (depth - 1) == 0:\n s += x\n if x / depth == depth:\n depth += 2\n numbers = 0\n print 'the sum of diagonal number is %s' % s", "def loglike(self, params, *args, **kwargs):\n return np.sum(self.loglikeobs(params, *args, **kwargs))", "def trace(self):\n if not self.is_square():\n raise(ValueError, \"Cannot calculate the trace of a non-square matrix.\")\n\n # TODO - your code here\n sum = 0.0\n for i in range(self.h):\n for j in range(self.w):\n if i == j:\n sum = sum + self.g[i][j]\n return sum\n # TODO - your code here", "def Kdiag(self, X, target):\r\n target+=self._Kdiag(X)", "def log_sum_exp(tensor, dim=-1, sum_op=torch.sum):\n max, _ = torch.max(tensor, dim=dim, keepdim=True)\n return torch.log(sum_op(torch.exp(tensor - max), dim=dim, keepdim=True) + 1e-8) + max", "def total_score(self, logits):\n previous = torch.full((1, self.tag_size), -10000., device=device)\n previous[0][self.tag_map[self.start_tag]] = 0.\n\n for index in range(len(logits)):\n previous = previous.expand(self.tag_size, self.tag_size).t()\n emit = logits[index].view(1, -1).expand(self.tag_size, self.tag_size)\n scores = previous + emit + self.transitions\n previous = log_sum_exp(scores)\n\n # previous = previous + self.transitions[:, self.tag_map[self.stop_tag]]\n # previous += self.transitions[self.tag_map[self.stop_tag]]\n previous += self.transitions[self.tag_map[:, self.stop_tag]]\n total_scores = log_sum_exp(previous.t())[0]\n return total_scores", "def loss_func(self, logits, targets):\r\n return -np.sum(targets * np.log(logits)) / logits.shape[0]", "def logsum_pair(logx, logy):\n if logx == logzero():\n return logy\n elif logx > logy:\n return logx + np.log1p(np.exp(logy-logx))\n else:\n return logy + np.log1p(np.exp(logx-logy))", "def _log_matrix_vector(ms, vs):\n return tf.reduce_logsumexp(ms + vs[..., tf.newaxis, :], axis=-1)", "def log_shift(data):\n result = [np.log(1 + np.abs(d.copy())) for d in data]\n return result", "def _trace_dense(op): # pragma: no cover\n x = 0.0\n for i in range(op.shape[0]):\n x += op[i, i]\n return x", "def log_sum_exp(v):\n\tm = max(v)\n\tx = m * np.ones(np.size(v))\n\treturn m + np.log(sum(np.exp(v - x)))", "def ivalue(self, idx):\n\n a = self.data[:,idx] / self.data[:,idx].sum()\n results = -(np.dot(a, np.log(a))) / np.log(len(self.data[:,idx]))\n return results", "def neglogli(preds, actual):\n preds = np.reshape(preds, (-1,))\n nll = np.sum((preds - actual*np.log(preds + 1e-07) + np.log(sp.special.factorial(actual))))\n return nll", "def forward(log_emlik, log_startprob, log_transmat):\n alpha = np.zeros(log_emlik.shape)\n\n alpha[0,:] = log_startprob[0:-1] + log_emlik[0,:]\n\n sum_row = 0;\n log_transmat = log_transmat[0:-1];\n\n\n for frame in range(1,len(log_emlik)):\n\n for state in range(0,len(log_emlik[0])):\n\n alpha[frame,state] = logsumexp(alpha[frame-1,:] + log_transmat[:,state]) + log_emlik[frame,state]\n #print(alpha[frame,state])\n #print(alpha[frame,:])\n\n return alpha", "def compute_edge_logits(self):", "def logSumExp(ns):\n mx = np.max(ns)\n ds = ns - mx\n sumOfExp = np.exp(ds).sum()\n return mx + np.log(sumOfExp)", "def log_check(w_in: np.ndarray, w_log: np.ndarray) -> None:\n w_log[:] = np.nan\n\n if np.isnan(w_in).any():\n return\n\n if np.any(w_in <= 0):\n return\n\n w_log[:] = np.log(w_in[:])", "def trace(self):#矩阵轨迹 即对角线和\n if not self.is_square():\n raise(ValueError, \"Cannot calculate the trace of a non-square matrix.\")\n\n # TODO - your code here\n sum = 0;\n for i in range(self.h):\n sum+=self.g[i][i];\n return sum;", "def _make_log_sum_revenue(self, df):\n\n # Get revenue and fill NaN with zero\n train_df = df.copy(deep=False)\n train_df['revenue'] = train_df['totals.transactionRevenue']\n train_df['revenue'] = train_df['revenue'].astype('float').fillna(0)\n\n # Group by visitor and sum, log\n train_gdf = train_df.groupby('fullVisitorId')\n train_revenue_sum = train_gdf['revenue'].sum()\n train_revenue_log_sum = (train_revenue_sum + 1).apply(np.log)\n return train_revenue_log_sum", "def double_logits(input_logits):\n if len(input_logits.shape) == 0:\n value_logit = float(input_logits)\n return np.array([1 - value_logit, value_logit])\n\n input_shape = input_logits.shape\n twin_logits = np.ones(input_shape) - input_logits\n\n output_logits = np.stack((twin_logits, input_logits), axis=1)\n\n return output_logits", "def sig(self, batch):\n ans = 0\n for t in [-1,-2,-3]:\n z = batch[0][t]\n ans += (z[:-1]*z[1:]).sum() \n return ans", "def sum(matrix):\n\n return float(sum([sum(row) for row in matrix]))", "def contour_cumsum(logZ):\n Z = np.exp(logZ)\n\n shape = Z.shape\n Z = Z.ravel()\n\n ind_sort = np.argsort(Z)[::-1]\n ind_unsort = np.argsort(ind_sort)\n\n Z_cumsum = Z[ind_sort].cumsum()\n Z_cumsum /= Z_cumsum[-1]\n\n return Z_cumsum[ind_unsort].reshape(shape)", "def _crt_sum(self, j):\r\n Y_j = self.Y[:, j]\r\n r = self.R[j]\r\n L = 0.\r\n tbl = r / (r + np.arange(Y_j.max()))\r\n for y in Y_j[Y_j > 0]:\r\n # FIXME: This will not work on non-count emissions.\r\n u = self.rng.uniform(0, 1, size=y)\r\n inds = np.arange(y)\r\n L += (u <= tbl[inds]).sum()\r\n return L", "def log_softmax_nd(logits, axes=(-1,)):\n logits -= tf.reduce_max(logits, axis=axes, keepdims=True)\n return logits - tf.reduce_logsumexp(logits, axis=axes, keepdims=True)", "def compute_entropy_loss(logits):\n policy = F.softmax(logits, dim=-1)\n log_policy = F.log_softmax(logits, dim=-1)\n return torch.sum(policy * log_policy)", "def _loglike(self, y, f):\n # sum along last axis, which is assumed to be the `tasks` axis\n ll = tf.reduce_sum(y * tf.log(pos(f)), axis=-1)\n return ll", "def batch_trace(in_tensor, batch_size, absolute_value=False):\n # mask = tf.constant(value=np.identity(diag_num))\n # mask = tf.tile(mask, [batch_size, 1, 1])\n diag_matrix = batch_diag_part(in_tensor, batch_size)\n if absolute_value:\n diag_matrix = tf.abs(diag_matrix)\n return tf.reduce_sum(diag_matrix, reduction_indices=[1], keep_dims=False)", "def generate_diagonal(n, l):\n res = []\n arr = [1] * l\n l = l+1\n for diag in range(n):\n res = []\n for index in range(1, l):\n summed = sum(arr[:index]) # sum is really slow for large numbers\n res.append(summed)\n arr = res\n return (arr)", "def dKdiag_dtheta(self,dL_dKdiag,X,target):\r\n target[0] += np.sum(dL_dKdiag)", "def logsumexp(x, axis=None):\n xmax = K.max(x, axis=axis, keepdims=True)\n xmax_ = K.max(x, axis=axis)\n return xmax_ + K.log(K.sum(K.exp(x - xmax), axis=axis))", "def logcumsumexp(x):\n return LogCumSumExp().apply((x,))[0]", "def gmmloglik(log_emlik, weights):\n N,_ = log_emlik.shape;\n ll = 0;\n for i in range(N):\n ll += logsumexp(log_emlik[i, :] + np.log(weights));\n return ll", "def compute_logits(self):\n # [num test images, 1, embedding size].\n test_embeddings = tf.expand_dims(self.test_embeddings, 1)\n\n # [1, num_clases, embedding_size].\n prototypes = tf.expand_dims(self.prototypes, 0)\n\n # Squared euclidean distances between each test embedding / prototype pair.\n distances = tf.reduce_sum(tf.square(test_embeddings - prototypes), 2)\n self.test_logits = -distances\n return self.test_logits", "def log_joint(self):\n return sum([\n self.log_marg_like(self.gamma, self.gamma0, self.lamb, self.nu),\n self._gamma0_distribution.logpdf(self.gamma0),\n self._nu_distribution.logpdf(self.nu),\n self._lambda_distribution.logpdf(self.lamb),\n self.probit_distribution(self.xi).logpdf(self.gamma),\n self._xi_distribution.logpdf(self.xi) if self.sample_xi else 0.0\n ])", "def forward(log_emlik, log_startprob, log_transmat):\n logPi=log_startprob[:-1]\n logB=log_emlik\n logA=log_transmat[:-1,:-1]\n alpha = np.zeros_like(logB)\n alpha[0]=logB[0]+logPi\n for i in range(1,logB.shape[0]):\n for j in range(logA.shape[0]):\n alpha[i][j]=logsumexp(alpha[i-1]+logA[:,j]+logB[i][j])\n return alpha", "def log_normalize(log_prob, axis):\n log_sum = logsumexp(log_prob, axis=axis)\n \n if not isinstance(log_sum, np.ndarray):\n log_sum = np.array([log_sum])\n if log_prob.shape[0] == log_sum.shape[0]:\n # column normalize \n return (log_prob.transpose() - log_sum).transpose()\n else:\n # row normalize\n return log_prob - log_sum", "def get_kl_logistic(x, posterior_alpha, prior_lambda_, posterior_lambda_, prior_alpha, dtype):\n logdiff = logp_logistic(x, posterior_alpha, posterior_lambda_, dtype) - logp_logistic(x, prior_alpha, prior_lambda_,\n dtype)\n logdiff = tf.matrix_set_diag(logdiff, tf.zeros((tf.shape(logdiff)[0], tf.shape(logdiff)[1]),\n dtype=dtype)) # set diagonal part to zero\n return tf.reduce_sum(tf.reduce_mean(logdiff, [0]))", "def compute_logits(self):\n # [num train labels, num classes] where each row is a one-hot-encoded label.\n one_hot_train_labels = tf.one_hot(self.data.train_labels, self.way)\n\n # Undocumented in the paper, but *very important*: *only* the support set\n # embeddings is L2-normalized, which means that the distance is not exactly\n # a cosine distance. For comparison we also allow for the actual cosine\n # distance to be computed, which is controlled with the\n # `exact_cosine_distance` instance attribute.\n train_embeddings = tf.nn.l2_normalize(\n self.train_embeddings, 1, epsilon=1e-3)\n test_embeddings = self.test_embeddings\n if self.exact_cosine_distance:\n test_embeddings = tf.nn.l2_normalize(test_embeddings, 1, epsilon=1e-3)\n # [num_test_images, num_train_images]\n similarities = tf.matmul(\n test_embeddings, train_embeddings, transpose_b=True)\n attention = tf.nn.softmax(similarities)\n\n # [num_test_images, way]\n probs = tf.matmul(attention, one_hot_train_labels)\n self.test_logits = tf.log(probs)\n return self.test_logits", "def log_jacobian_tensor(self, x):\n pass", "def cost(h, y):\n\tm = y.shape[0]\n\tcost = (-1/m) * (y.T @ np.log(h) + (1 - y).T @ np.log(1 - h))\n\treturn cost" ]
[ "0.6600348", "0.6512393", "0.63457274", "0.62601304", "0.62387985", "0.6219907", "0.6185488", "0.6124841", "0.6087076", "0.606045", "0.6032963", "0.6027041", "0.6024959", "0.60206836", "0.60127777", "0.6009793", "0.598619", "0.5973478", "0.5966066", "0.58722997", "0.58696294", "0.5851559", "0.5845538", "0.5815091", "0.5811031", "0.5809058", "0.57991326", "0.5771243", "0.5762435", "0.57612073", "0.57436866", "0.57436866", "0.5723668", "0.5704552", "0.56709534", "0.5663107", "0.56339574", "0.56318593", "0.5624894", "0.5613829", "0.56082255", "0.560439", "0.558793", "0.558337", "0.5573231", "0.55646133", "0.5563775", "0.5556884", "0.5550779", "0.55471826", "0.5543978", "0.5515804", "0.55149966", "0.550421", "0.55021495", "0.54930717", "0.54818106", "0.5470803", "0.5469987", "0.54661745", "0.546186", "0.5459064", "0.54378927", "0.54323685", "0.5428678", "0.5424982", "0.54204637", "0.54140645", "0.5412464", "0.5406627", "0.5400769", "0.5394334", "0.5389694", "0.5387015", "0.53862435", "0.5383022", "0.5378372", "0.53747183", "0.5362974", "0.53454834", "0.5335365", "0.53249425", "0.53203076", "0.5317927", "0.53176504", "0.5309197", "0.5306997", "0.5304018", "0.529961", "0.52884406", "0.5287804", "0.52714163", "0.5263686", "0.526279", "0.5260292", "0.5258711", "0.525604", "0.5255063", "0.5251269", "0.5239415" ]
0.6544133
1
Shifts `pvals` by the largest value in the last dimension before the exp is calculated to prevent overflow (batchwise if necessary). Can be used if probabilities are normalized again later.
def shiftedexp(pvals: np.ndarray) -> np.ndarray: if pvals.shape[-1] == 0: return np.empty_like(pvals) return np.exp(pvals - np.amax(pvals, axis=-1)[..., None])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def benjamini_hochberg_step_down(pvals):\r\n tmp = fdr_correction(pvals)\r\n corrected_vals = empty(len(pvals))\r\n max_pval = 1.\r\n for i in argsort(pvals)[::-1]:\r\n if tmp[i] < max_pval:\r\n corrected_vals[i] = tmp[i]\r\n max_pval = tmp[i]\r\n else:\r\n corrected_vals[i] = max_pval\r\n return corrected_vals", "def _correct_p_values(self, p_vals):\r\n num_tests = len([p_val for p_val in p_vals if p_val is not None])\r\n corrected_p_vals = []\r\n for p_val in p_vals:\r\n if p_val is not None:\r\n corrected_p_vals.append(min(p_val * num_tests, 1))\r\n else:\r\n corrected_p_vals.append(p_val)\r\n return corrected_p_vals", "def softmax(values):\n exps = np.exp(values)\n return exps / sum(exps)", "def fdr_correction(pvals):\r\n tmp = array(pvals).astype(float) # this converts Nones to nans\r\n return tmp * tmp.size / (1. + argsort(argsort(tmp)).astype(float))", "def softmax(p):\n p_exp = np.exp(p)\n return p_exp / np.sum(p_exp)", "def test_correct_p_values(self):\r\n exp = [0.003, 0.006, 0.003]\r\n obs = self.mc._correct_p_values([0.001, 0.002, 0.001])\r\n assert_almost_equal(obs, exp)", "def compute_power(pvals, SNPs):\n\tnsnps = len(pvals)\n\tall_snps = np.arange(0, nsnps)\n\tpos = SNPs\n\tnegs = list(set(all_snps) - set(SNPs))\n\n\tpvals_rank = rank_array(pvals)\n\n\trocr = np.zeros((nsnps, 2))\n\tfor i in all_snps:\n\t\tv = pvals_rank[0:i] # test positives\n\t\tz = list(set(all_snps) - set(v)) # test negatives\n\n\t\tTP = len(set(v) & set(pos))\n\t\tFP = len(set(v) & set(negs))\n\t\tTN = len(set(z) & set(negs))\n\t\tFN = len(set(z) & set(pos))\n\n\t\tTPR = 1.0*TP/(TP+FN); FPR = 1.0*FP/(FP+TN); #FDR = 1.0*FP/(FP+TP)\n\n\t\trocr[i, :] = [FPR, TPR]\n\n\treturn rocr", "def test_correct_p_values_large_correction(self):\r\n exp = [1, None, 0.03, 0.03]\r\n obs = self.mc._correct_p_values([0.5, None, 0.01, 0.01])\r\n self.compare_multiple_level_array(obs, exp)", "def adjustPValues(p_values, method=\"fdr\"):\n\tadjusted_p_values = p_values[:]\n\tn = len(p_values)\n\tif method.lower() == \"bh\" or method.lower() == 'fdr':\n\t\tni = range(n,0,-1) # from n to 1\n\t\t# Sort the P values and keep track of the indices\n\t\tindexed_pv = sorted(zip(p_values, range(n)), reverse=True)\n\t\t(pvals,inds) = zip(*indexed_pv)\n\t\t# adjust\n\t\tnewp = [(float(n)/ni[xi])*pvals[xi] for xi in range(n)]\n\t\tcum_min_p = [min(newp[0:xi]) for xi in range(1,n+1)]\n\t\tadjp_sorted = [min(p,1.0) for p in cum_min_p]\n\t\t# re-sort\n\t\tadjusted_p_values = [-1]*n\n\t\tfor xi in range(n):\n\t\t\tadjusted_p_values[inds[xi]] = adjp_sorted[xi]\n\telif method.lower() == 'bonferroni':\n\t\tadjusted_p_values = [min(n*p,1.0) for p in p_values]\n\treturn adjusted_p_values", "def softmax(val, axis=-1):\n exp = np.exp(val - np.amax(val, axis=axis, keepdims=True))\n return exp / np.sum(exp, axis=axis, keepdims=True)", "def sample_probabilities(pvals: np.ndarray) -> Callable[[], int]:\n\n return Sampler(np.cumsum(pvals))", "def cumprobs(self, values):\n values = np.asarray(values)\n index = np.searchsorted(self.xs, values, side='right')\n ps = self.ps[index-1]\n ps[values < self.xs[0]] = 0.0\n return ps", "def bonferroni_correction(pvals):\r\n return (\r\n array(pvals, dtype=float) * len(pvals) # float conversion: Nones->nans\r\n )", "def genvals():\n vals = np.empty(200)\n vals[:50] = np.arange(50) / 50\n vals[50:100] = (50 - np.arange(50)) / 50\n vals[100:] = -vals[:100]\n return vals", "def save_expval_post_meas_values():\n targets = []\n for statevec in save_expval_final_statevecs():\n values = {}\n for label, (mat, qubits) in save_expval_params().items():\n inner_dict = {}\n for j in [\"00\", \"01\", \"10\", \"11\"]:\n # Check if non-zero measurement probability for given\n # measurement outcome for final statevector\n vec = Statevector.from_label(j)\n if not np.isclose(vec.data.dot(statevec.data), 0):\n # If outcome is non-zero compute expectation value\n # with post-selected outcome state\n inner_dict[hex(int(j, 2))] = vec.data.conj().dot(vec.evolve(mat, qubits).data)\n values[label] = inner_dict\n targets.append(values)\n return targets", "def lpflip(P):\n if len(P) == 1:\n return 0\n\n Z = logsumexp(P)\n P -= Z\n\n NP = np.exp(np.copy(P))\n\n assert math.fabs(1.0-sum(NP)) < 10.0**(-10.0)\n\n return pflip(NP)", "def _test_stack(values, pops=0):\n stack = StackWithMax()\n for val in values:\n stack.push(val)\n for _ in range(pops):\n stack.pop()\n\n return stack.max()", "def softmax(input):\n list_value = []\n len_compute = input.shape[-1]\n shape_input = input.shape\n for x in input.reshape(-1, len_compute):\n # print(x)\n e_x = np.exp(x - np.max(x))\n res = e_x / e_x.sum(axis=0)\n list_value.append(res)\n\n return np.array(list_value).reshape(shape_input)", "def __ExpMovingAverage(self, values, window):\n weights = np.exp(np.linspace(-1., 0., window))\n weights /= weights.sum()\n a = np.convolve(values, weights, mode='full')[:len(values)]\n a[:window] = a[window]\n return a", "def epsilongreedy_policy(Qvalues_oa):\n \n X = np.zeros_like(Qvalues_oa)\n \n # where are the actions with maximal value?\n maxX = Qvalues_oa == np.max(Qvalues_oa, axis=-1, keepdims=True)\n \n # assign 1-eps probability to max actions\n X += (1-epsilon) * maxX / maxX.sum(axis=-1, keepdims=True)\n \n # assign eps probability to other actions\n othX = np.logical_not(maxX)\n X += epsilon * othX / othX.sum(axis=-1, keepdims=True)\n \n assert np.allclose(X.sum(-1), 1.0)\n \n return X", "def policy_eval():\r\n \r\n action_prob = [0.125, 0.625, 0.125, 0.125]# actions with probabilities\r\n data = grid_world()\r\n state_axis = np.zeros((9, 9))#initialize states\r\n threshold = .1\r\n prior_state = np.ones((9, 9))\r\n \r\n while np.abs(state_axis - prior_state).max() > threshold:\r\n for x, y in product(range(9), repeat=2):\r\n prior_state = state_axis.copy()\r\n if data.array[x, y] == 'X':\r\n continue\r\n updated_values = [data.next_direction(np.array([x, y]), next_move)\r\n for next_move in data.directions]#Updating states with directions\r\n Sum_Expectation = np.dot(action_prob,\r\n [points_val + 0.9 * state_axis[position[0], position[1]]\r\n for position, points_val in updated_values])\r\n state_axis[x, y] = Sum_Expectation\r\n print(\"\\nExercise 3.1 Shows Value functions for the policy\\n\")\r\n print(state_axis)\r\n build_grid(state_axis, \"Shows Value functions for the policy\")", "def HC_update(p_values, alpha):\n p_values = np.sort(p_values) # Make sure p-values are sorted in ascending order\n n = len(p_values) # Number of data points\n ivalues = np.arange(1, n + 1)\n #p_values = p_values[0:int(round(n/2))] # Cut-off half of the values\n HC_vec = np.sqrt(n)*(ivalues/(n+1) - p_values)/np.sqrt(p_values - p_values**2) # Calculate scores for all datapoints\n HC_vec_reduced = HC_vec[0:int(alpha*(len(HC_vec)-1))]\n max_idx = np.argmax(HC_vec_reduced)\n return HC_vec_reduced[max_idx], max_idx, HC_vec_reduced", "def correct_pvalues_for_multiple_testing(pvalues, correction_type = \"Benjamini-Hochberg\"):\n pvalues = array(pvalues)\n n = int(pvalues.shape[0])\n new_pvalues = empty(n)\n if correction_type == \"Bonferroni\":\n new_pvalues = n * pvalues\n elif correction_type == \"Bonferroni-Holm\":\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n for rank, vals in enumerate(values):\n pvalue, i = vals\n new_pvalues[i] = (n-rank) * pvalue\n elif correction_type == \"Benjamini-Hochberg\":\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n values.reverse()\n new_values = []\n for i, vals in enumerate(values):\n rank = n - i\n pvalue, index = vals\n new_values.append((n/rank) * pvalue)\n for i in range(0, int(n)-1):\n if new_values[i] < new_values[i+1]:\n new_values[i+1] = new_values[i]\n for i, vals in enumerate(values):\n pvalue, index = vals\n new_pvalues[index] = new_values[i]\n return new_pvalues", "def eps_greedy_policy(q_values, eps, forbidden_actions):\r\n\r\n q_values[forbidden_actions] = np.NINF\r\n indices = torch.nonzero(q_values == q_values.max())\r\n random_index = random.randint(0, indices.shape[1]-1)\r\n best_action_index = indices[random_index]\r\n l = len(q_values)\r\n n_forbidden_actions = np.count_nonzero(forbidden_actions)\r\n p = eps / (l-n_forbidden_actions)\r\n\r\n policy = np.full([l], p)\r\n policy[forbidden_actions] = 0\r\n policy[best_action_index] += 1 - eps\r\n\r\n return policy", "def calc_probs(log_p):\n\n N = log_p.shape[0]\n\n log_Z_per_N = np.zeros(shape=(N, 1))\n\n for i in range(N):\n\n log_Z_per_N[i] = log_norm(log_p[i])\n\n log_p_new = log_p - log_Z_per_N\n\n p = np.exp(log_p_new)\n\n # log_Z = log_norm(log_p)\n\n # p = np.exp(log_p - log_Z)\n\n return p", "def compute_vals(\n self, vals: List[int], modes: int, instruction: MachineInstruction\n ) -> List[int]:\n # Use an intermediate string to obtain any missing leading zeros, since our opcode is\n # already an int\n # Reverse because parameter modes go from right to left\n modes = [int(mode) for mode in reversed(str(modes).zfill(len(vals)))]\n out_vals = [self.parameter_modes[mode](val) for val, mode in zip(vals, modes)]\n\n # Correct for write instructions always being in position mode\n if instruction.writes:\n out_vals[-1] = vals[-1]\n\n return out_vals", "def softmax(scores):\n exp_score = np.exp(scores)\n return exp_score / np.sum(exp_score)", "def softmax(X):\n _X = X - np.max(X, axis=1).reshape(-1, 1)\n ep = np.exp(_X)\n return ep / np.sum(ep, axis=1).reshape(-1, 1)", "def fixval(arr, repval, retarr=False):\n # 2009-09-02 14:07 IJC: Created\n # 2012-12-23 11:49 IJMC: Halved run time.\n\n if retarr:\n arr2 = arr.ravel().copy()\n else:\n arr2 = arr.ravel()\n\n finiteIndex = np.isfinite(arr2)\n if not finiteIndex.any():\n badIndex = find((1-finiteIndex))\n arr2[badIndex] = repval\n\n if retarr:\n return arr2.reshape(arr.shape)\n else:\n return", "def convert_to_one_tailed(longpvals):\n higher_in_dis = longpvals[longpvals['p'] > 0].index\n longpvals.loc[higher_in_dis, 'p-dis'] = longpvals.loc[higher_in_dis, 'p']/2\n\n higher_in_h = longpvals[longpvals['p'] <= 0].index\n longpvals.loc[higher_in_h, 'p-h'] = abs(longpvals.loc[higher_in_h, 'p']/2)\n\n def p_for_other_side(row, side, otherside):\n if np.isnan(row[side]):\n return 1-row[otherside]\n else:\n return row[side]\n longpvals['p-dis'] = longpvals.apply(p_for_other_side,\n args=('p-dis', 'p-h'),\n axis=1)\n longpvals['p-h'] = longpvals.apply(p_for_other_side,\n args=('p-h', 'p-dis'),\n axis=1)\n return longpvals", "def correct_pvalues_for_multiple_testing(pvalues, correction_type=\"Benjamini-Hochberg\"):\r\n from numpy import array, empty\r\n pvalues = array(pvalues)\r\n n = float(pvalues.shape[0])\r\n new_pvalues = empty(n)\r\n if correction_type == \"Bonferroni\":\r\n new_pvalues = n * pvalues\r\n elif correction_type == \"Bonferroni-Holm\":\r\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\r\n values.sort()\r\n for rank, vals in enumerate(values):\r\n pvalue, i = vals\r\n new_pvalues[i] = (n - rank) * pvalue\r\n elif correction_type == \"Benjamini-Hochberg\":\r\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\r\n values.sort()\r\n values.reverse()\r\n new_values = []\r\n for i, vals in enumerate(values):\r\n rank = n - i\r\n pvalue, index = vals\r\n new_values.append((n / rank) * pvalue)\r\n for i in range(0, int(n) - 1):\r\n if new_values[i] < new_values[i + 1]:\r\n new_values[i + 1] = new_values[i]\r\n for i, vals in enumerate(values):\r\n pvalue, index = vals\r\n new_pvalues[index] = new_values[i]\r\n return new_pvalues", "def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):\r\n ps = [p_vals] if np.isscalar(p_vals) else p_vals\r\n\r\n if not sorted_:\r\n data = sorted(data)\r\n n = len(data)\r\n d = []\r\n for p in ps:\r\n fi = p * n / 100 - 0.5\r\n if fi <= 0: # maybe extrapolate?\r\n d.append(data[0])\r\n elif fi >= n - 1:\r\n d.append(data[-1])\r\n else:\r\n i = int(fi)\r\n d.append((i+1 - fi) * data[i] + (fi - i) * data[i+1])\r\n return d[0] if np.isscalar(p_vals) else d", "def safe_cumprod(x, eps):\n return torch.exp(exclusive_cumsum(torch.log(torch.clamp(x, min=eps, max=1.0))))", "def softmax(self, x):\n \"\"\" Meaning: avoid very large exponents by reducing the largest to zero\n and everything else to less than that\"\"\"\n ex = np.exp(x - np.max(x))\n return ex / np.sum(ex, axis = 1, keepdims=True) #Sums over rows np.sum([[0, 1], [0, 5]], axis=1) = array([1, 5])", "def calculate_pvalue_ranges(self, eval_x: np.ndarray) -> np.ndarray:\n bgd_activations = self.sorted_bgd_activations\n eval_activations = self.detector.get_activations(eval_x, self._layer_name, batch_size=128)\n\n if len(eval_activations.shape) == 4:\n dim2 = eval_activations.shape[1] * eval_activations.shape[2] * eval_activations.shape[3]\n eval_activations = np.reshape(eval_activations, (eval_activations.shape[0], dim2))\n\n bgrecords_n = bgd_activations.shape[0]\n records_n = eval_activations.shape[0]\n atrr_n = eval_activations.shape[1]\n\n pvalue_ranges = np.empty((records_n, atrr_n, 2))\n\n for j in range(atrr_n):\n pvalue_ranges[:, j, 0] = np.searchsorted(bgd_activations[:, j], eval_activations[:, j], side=\"right\")\n pvalue_ranges[:, j, 1] = np.searchsorted(bgd_activations[:, j], eval_activations[:, j], side=\"left\")\n\n pvalue_ranges = bgrecords_n - pvalue_ranges\n\n pvalue_ranges[:, :, 0] = np.divide(pvalue_ranges[:, :, 0], bgrecords_n + 1)\n pvalue_ranges[:, :, 1] = np.divide(pvalue_ranges[:, :, 1] + 1, bgrecords_n + 1)\n\n return pvalue_ranges", "def softmax(x: npt.NDArray) -> npt.NDArray:\n row_wise_max = np.max(x, axis=1).reshape(-1, 1)\n exp_x = np.exp(x - row_wise_max)\n return exp_x / np.sum(exp_x, axis=1).reshape(-1, 1)", "def softmax(x):\n exps = np.exp(x - np.max(x, axis=0))\n return exps / exps.sum(axis=0)", "def assertCorrectPValue(self, exp_min, exp_max, fn, num_perms=None,\r\n p_val_key='p_value'):\r\n found_match = False\r\n for i in range(self.p_val_tests):\r\n if num_perms is not None:\r\n obs = fn(num_perms)\r\n else:\r\n obs = fn()\r\n p_val = obs[p_val_key]\r\n self.assertTrue(0.0 <= p_val < 1.0)\r\n if p_val >= exp_min and p_val <= exp_max:\r\n found_match = True\r\n break\r\n self.assertTrue(found_match)", "def policyEval(policy, P, R, gamma, theta, max_iter=1000000):\n num_S, num_a = policy.shape\n v = np.zeros(num_S) # initialize value function\n k = 0 # counter of iteration\n\n for k in range(int(max_iter)):\n delta = 0\n new_values = np.zeros(num_S)\n # iterate through each state\n for s in range(num_S):\n v_temp = v[s]\n new_Vs_terms = []\n for a in range(len(policy[s])):\n for s_prime in range(len(P[s, a])):\n new_term = P[s, a, s_prime] * policy[s, a] * (R[s, a, s_prime] + gamma * v[s_prime])\n new_Vs_terms.append(new_term)\n\n new_values[s] = sum(new_Vs_terms)\n delta = max(delta, abs(v_temp - new_values[s]))\n v = new_values\n if delta < theta:\n print(k, \"iterations\")\n return np.around(v, 4)\n return np.around(v, 4)", "def softmax_policy(Qvalues_oa):\n betaQoa = beta * Qvalues_oa\n betaQoa_ = betaQoa - betaQoa.mean(-1, keepdims=True)\n expQoa = np.exp(betaQoa_)\n assert not np.any(np.isinf(expQoa)), \"behavior policy contains infs\"\n return expQoa / expQoa.sum(axis=-1, keepdims=True)", "def test_correct_p_values_no_change(self):\r\n exp = [None, 0.008]\r\n obs = self.mc._correct_p_values([None, 0.008])\r\n self.assertEqual(obs, exp)\r\n exp = [0.007]\r\n obs = self.mc._correct_p_values([0.007])\r\n assert_almost_equal(obs, exp)", "def policyImprv(P,R,gamma,policy,v):\n def one_step_lookahead(s, V):\n \"\"\"\n :param state: current state\n :param v: current value estimator\n :return: A, list of optimal action values under current value estimator\n \"\"\"\n num_a = policy.shape[1]\n A = np.zeros(num_a)\n for a in range(num_a):\n for s_prime in range(num_S):\n A[a] += P[s, a, s_prime] * (R[s, a, s_prime] + gamma * V[s_prime])\n return A\n\n # initialization \n num_S, num_a = policy.shape\n policy_stable = True\n\n for s in range(num_S):\n\n chosen_a = np.argmax(policy[s])\n\n action_values = one_step_lookahead(s, v)\n best_a = np.argmax(action_values)\n\n if chosen_a != best_a:\n policy_stable = False\n\n for i in range(num_a):\n if i != best_a:\n policy[s][i] = 0\n if i == best_a:\n policy[s][best_a] = 1\n return policy, policy_stable", "def compute(self, node, input_vals):\r\n #start = time.time()\r\n\r\n #assert len(input_vals) == 1\r\n strides = node.const_attr[1]\r\n ksize = node.const_attr[0]\r\n ish = list(input_vals[0].shape)\r\n input = input_vals[0]\r\n output = np.zeros([ish[0],(ish[1]-ksize[1])//strides[1]+1,(ish[2]-ksize[2])//strides[2]+1,ish[3]])\r\n osh = output.shape\r\n #print(osh)\r\n for i in range(osh[1]):\r\n for j in range(osh[2]):\r\n output[:,i,j,:] = np.amax(input[:,i*strides[1]:(i+1)*strides[1],j*strides[1]:(j+1)*strides[1],:],axis=(1,2))\r\n #end = time.time() \r\n #print(\"max_pool\") \r\n #print(end - start) \r\n return output\r\n \r\n #assert False\r", "def process_value(self, fft_value, peak_values):\n if fft_value < self.trigger_threshold:\n peak_values.append(0)\n else:\n if self.output_binary is True:\n peak_values.append(1)\n else:\n scaled_max = self.trigger_threshold + self.trigger_offset\n\n if fft_value < scaled_max:\n scaled_value = (scaled_max - fft_value) / scaled_max\n scaled_value *= self.scaled_max_value\n peak_values.append(math.ceil(scaled_value))\n else:\n peak_values.append(self.scaled_max_value)", "def exp_inplace(a):", "def softmax(x):\r\n exps = np.exp(x)\r\n return exps / np.sum(exps)", "def test_extreme_values(self):\n with pytest.warns(RuntimeWarning) as warninfo:\n assert np.exp(laplace_approx(999999, 999999, self.data)) == 0\n with pytest.warns(RuntimeWarning) as warninfo:\n assert np.exp(laplace_approx(999999, 999999, self.data)) == 0", "def softmax(L):\n expL = np.exp(L)\n sumExpL = sum(expL)\n result = []\n for i in expL:\n result.append(i*1.0/sumExpL)\n return result", "def process_values(self):\r\n \r\n if self.padding>0:\r\n channels = np.zeros((self.vals.shape[0], self.vals.shape[1]+self.padding))\r\n channels[:, 0:self.vals.shape[0]] = self.vals\r\n else:\r\n channels = self.vals\r\n vals_mat = self.skel.to_xyz(channels.flatten())\r\n self.vals = np.zeros_like(vals_mat)\r\n # Flip the Y and Z axes\r\n self.vals[:, 0] = vals_mat[:, 0].copy()\r\n self.vals[:, 1] = vals_mat[:, 2].copy()\r\n self.vals[:, 2] = vals_mat[:, 1].copy()", "def calculate_vals(self):\n for pp in self.powerplants:\n pp[\"vals\"] = self.possible_vals(pp)\n pp[\"index\"] = 0", "def exp(X):\n X = np.maximum(X,100)\n return np.exp(X)", "def PLP(self, *_):\n self.reg.P = self.pop()", "def softmax(oSums):\n result = np.zeros(shape=[len(oSums)], dtype=np.float32)\n m = max(oSums)\n divisor = 0.0\n for k in range(len(oSums)):\n divisor += math.exp(oSums[k] - m)\n for k in range(len(result)):\n result[k] = math.exp(oSums[k] - m) / divisor\n return result", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return (e_x / e_x.sum()).tolist()", "def softmax(x):\n \"\"\" Meaning: avoid very large exponents by reducing the largest to zero\n and everything else to less than that, so they go to zero instead of infinity an Nan\"\"\"\n ex = np.exp(x - np.max(x))\n return ex / ex.sum(axis=1, keepdims = True)", "def softmax(x):\n output = []\n denomSum = 0\n for i in range(0, len(x)):\n denomSum += np.exp(x[i])\n\n for j in range(0, len(x)):\n output.append(np.exp(x[j]) / denomSum)\n return np.asarray(output)", "def checkvalues(self, idx_list, val_list):\n\n def dist(idx):\n idx_prev = self.indices_all[-self.points_mem:]\n # Calculate distances between current point and previous n points\n d_all = [np.linalg.norm(np.array(idx) - np.array(i)) for i in idx_prev]\n # Calculate weighting coefficient for each distance\n dscale_all = [dscale_*self.gamma**i for i in range(len(idx_prev))]\n # Check if each distance satisfies the imposed criteria\n bool_ = 0 in [d > l for (d, l) in zip(d_all[::-1], dscale_all)]\n return bool_\n\n dscale_ = 0 if self.dscale is None else self.dscale\n _idx = 0\n if self.verbose == 2:\n print('Acquisition function max value {} at {}'.format(\n val_list[_idx], idx_list[_idx]))\n if len(self.indices_all) == 0:\n return idx_list[_idx], val_list[_idx]\n while (1 in [1 for a in self.indices_all if a == idx_list[_idx]]\n or dist(idx_list[_idx])):\n if self.verbose == 2:\n print(\"Finding the next max point...\")\n _idx = _idx + 1\n if _idx == len(idx_list):\n _idx = np.random.randint(0, len(idx_list)) if self.exit_strategy else -1\n if self.verbose == 2:\n print('Index out of list. Exiting with acquisition function value {} at {}'.format(\n val_list[_idx], idx_list[_idx]))\n break\n if self.verbose == 2:\n print('Acquisition function max value {} at {}'.format(\n val_list[_idx], idx_list[_idx]))\n return idx_list[_idx], val_list[_idx]", "def __setitem__(self, values, p):\n if isinstance(values, dict):\n values = [values[var] for var in self.variables]\n self.prob[values] = p\n for var,val in zip(self.variables, values):\n if val not in self.vals[var]:\n self.vals[var].append(val)", "def softmax(x):\n shape = x.shape\n probs = np.exp(x - np.max(x, axis=len(shape) - 1, keepdims=True))\n probs /= np.sum(probs, axis=len(shape) - 1, keepdims=True)\n return probs", "def softmax(self, x):\n if x.ndim == 1:\n x = x.reshape((1, -1))\n max_x = np.max(x, axis=1).reshape((-1, 1))\n exp_x = np.exp(x - max_x)\n return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1))", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return old_div(e_x, e_x.sum())", "def ExpMovingAverage(values, window):\n weights = np.exp(np.linspace(-1.0, 0.0, window))\n weights /= weights.sum()\n a = np.convolve(values, weights, mode=\"full\")[: len(values)]\n a[:window] = a[window]\n return a", "def extrapolate_with_worst_case(values: List[float], n: int = 5) -> float:\n n = min(len(values), n)\n return values[-1] + max(v_next - v_prev for v_prev, v_next in zip(values[-n:], values[-n+1:]))", "def softmax(x):\n e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))\n return e_x / np.sum(e_x, axis=1).reshape(-1, 1)", "def softmax(x):\n xx = x\n x = x.reshape((-1, x.shape[-1]))\n e_x = np.exp(x - np.max(x, 1).reshape(-1, 1))\n res = e_x / e_x.sum(axis=1).reshape(-1, 1)\n return res.reshape(xx.shape)", "def softmax(inputs):\n probs = np.exp(inputs)\n # print(probs.shape)\n # t = np.sum(probs, axis=0)\n # print(t.shape)\n\n probs /= np.sum(probs, axis=0)[np.newaxis,:]\n return probs", "def _compute_softmax(scores):\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = np.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs", "def log_prob(self, scores : torch.Tensor, permutations):\n s = torch.log(select_indices(scores, permutations))\n n = len(scores)\n p = self.upto if self.upto is not None else n - 1\n return -sum(\n torch.log(torch.exp((s[k:] - s[k]) * self.shape).sum(dim=0))\n for k in range(p))", "def softmax(x): \n e_x = np.exp(x - np.max(x)) \n return e_x / e_x.sum()", "def softmax(x):\r\n e_x = np.exp(x - np.max(x))\r\n return e_x / e_x.sum()", "def softmax(x):\r\n e_x = np.exp(x - np.max(x))\r\n return e_x / e_x.sum()", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)", "def _compute_softmax(scores):\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs", "def _compute_softmax(scores):\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs", "def _compute_softmax(scores):\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs", "def _compute_softmax(scores):\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs", "def test_max_entangled_expval(self):\n circuit = max_entangled_circuit(3, shots=100000)\n bits, recipes = circuit()\n shadow = ClassicalShadow(bits, recipes)\n\n obs = [\n qml.PauliX(1),\n qml.PauliX(0) @ qml.PauliX(2),\n qml.PauliZ(2),\n qml.Identity(1) @ qml.PauliZ(2),\n qml.PauliZ(1) @ qml.PauliZ(2),\n qml.PauliX(0) @ qml.PauliY(1),\n qml.PauliX(0) @ qml.PauliY(1) @ qml.Identity(2),\n qml.PauliY(0) @ qml.PauliX(1) @ qml.PauliY(2),\n ]\n\n expected = [0, 0, 0, 0, 1, 0, 0, -1]\n\n actual = shadow.expval(obs, k=10)\n assert actual.shape == (8,)\n assert actual.dtype == np.float64\n assert qml.math.allclose(actual, expected, atol=1e-1)", "def expanding_max_1d_nb(a, minp=1):\n out = np.empty_like(a, dtype=np.float_)\n maxv = a[0]\n cnt = 0\n for i in range(a.shape[0]):\n if np.isnan(maxv) or a[i] > maxv:\n maxv = a[i]\n if ~np.isnan(a[i]):\n cnt += 1\n if cnt < minp:\n out[i] = np.nan\n else:\n out[i] = maxv\n return out", "def decode_from_P(P):\n N = P.shape[0]\n A = P.shape[1]\n \n X = np.arange(N)\n \n for i in range(N):\n max_val = -1e100\n for a in range(A):\n if P[i,a] > max_val:\n max_val = P[i,a]\n X[i] = a\n \n return X", "def softmax(x):\n # Compute and return softmax(x)\n denom = sum(np.exp(x))\n return [ np.exp(xi)/denom for xi in x ]", "def softmax(arr: np.ndarray, axis: int = -1):\n c = arr.max(axis=axis, keepdims=True)\n s = arr - c\n nominator = np.exp(s)\n denominator = nominator.sum(axis=axis, keepdims=True)\n probs = nominator / denominator\n return probs", "def set_max_evaluations(self,ev):\n self.max_evaluations = ev", "def _check_maxexp(np_type, maxexp):\n dt = np.dtype(np_type)\n np_type = dt.type\n two = np_type(2).reshape((1,)) # to avoid upcasting\n return (np.isfinite(two ** (maxexp - 1)) and\n not np.isfinite(two ** maxexp))", "def max_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)\n return y", "def clip_outliers(values, p=99):\n min = np.percentile(values, 100 - p)\n max = np.percentile(values, p)\n return np.clip(values, min, max)", "def clip_outliers(values, p=99):\n min = np.percentile(values, 100 - p)\n max = np.percentile(values, p)\n return np.clip(values, min, max)", "def softmax(x):\n if type(x) == list:\n dim=len(x)\n norm = np.sum(np.exp(x))\n for idx in range(dim):\n x[idx] = np.exp(x[idx])/norm\n elif type(x) == np.ndarray:\n dim=x.shape\n for col in range(dim[1]):\n norm = np.sum(np.exp(x[:, col]))\n for idx in range(dim[0]):\n x[idx, col] = np.exp(x[idx, col])/norm\n else:\n raise Exception('incorrect input')\n return x", "def argmax(self, values):\n return self.aggregate(values, \"argmax\")", "def _core_calc_prod(self,bp,Lp) :\n\t\tprod = np.dot(Lp,bp)\n\t\tprod = np.exp(prod)\n\t\treturn prod", "def softmax(x):\n num = np.exp(x)\n den = np.sum(np.exp(x), axis=1)\n output = (num.T / den).T\n return output", "def test_correct_p_values_mixed(self):\r\n exp = [None, 0.008, 0.01, None]\r\n obs = self.mc._correct_p_values([None, 0.004, 0.005, None])\r\n self.assertEqual(obs, exp)", "def _infer_pvalues(self, effect, perm, p=.05, mcp='maxstat'):\n assert all([isinstance(k, np.ndarray) for k in (effect, perm)])\n n_perm = perm.shape[0]\n # compute the minimum number of required permutations\n n_perm_req = int(10. / p)\n if n_perm < n_perm_req:\n logger.warning(f\"For inferences at p<{p}, it is recommended to per\"\n f\"form at least n_perm={n_perm_req} permutations\")\n\n # ---------------------------------------------------------------------\n logger.info(f\" infer p-values at (p={p}, mcp={mcp})\")\n # computes the pvalues\n if mcp is 'maxstat':\n max_p = perm.reshape(n_perm, -1).max(1)[np.newaxis, ...]\n nb_over = (effect[..., np.newaxis] <= max_p).sum(-1)\n pvalues = nb_over / n_perm\n # non-signi. p-values are set to 1. and min(pvalues) = 1 / n_perm\n pvalues[pvalues >= p] = 1.\n pvalues = np.maximum(1. / n_perm, pvalues)\n elif mcp in ['fdr', 'bonferroni']:\n from mne.stats import fdr_correction, bonferroni_correction\n fcn = fdr_correction if mcp is 'fdr' else bonferroni_correction\n # compute the p-values\n pvalues = (effect[np.newaxis, ...] <= perm).sum(0) / n_perm\n pvalues = np.maximum(1. / n_perm, pvalues)\n # apply correction\n is_signi, pvalues = fcn(pvalues, alpha=p)\n pvalues[~is_signi] = 1.\n\n return pvalues", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0) # only difference", "def eval(self, p):\n from max_plus.max_plus_int import minus_infinity, IntegerMaxPlusMatrix\n F = FreeModule(ZZ, self._nvars)\n p = F(p)\n mat = []\n d = self.dim()\n for i in range(d):\n row = []\n for j in range(d):\n pts = self[i,j]\n row.append(minus_infinity() if not pts else max(p.dot_product(v) for v in pts))\n mat.append(row)\n return IntegerMaxPlusMatrix(self._d, self._d, mat)", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()" ]
[ "0.62695336", "0.6066202", "0.58225965", "0.57363963", "0.5489803", "0.5446892", "0.5247545", "0.52059096", "0.5191071", "0.51882726", "0.5157705", "0.5066351", "0.5033633", "0.4979771", "0.4901542", "0.4892328", "0.48860258", "0.48807552", "0.48794442", "0.48614326", "0.48505324", "0.48391014", "0.4834803", "0.48337024", "0.48271474", "0.4798808", "0.4785805", "0.47762957", "0.4758349", "0.4753474", "0.47520307", "0.47460622", "0.47439772", "0.47224474", "0.47220713", "0.47197607", "0.47126818", "0.47069144", "0.4705258", "0.46997193", "0.4693721", "0.46875903", "0.46874928", "0.46835726", "0.46781316", "0.4671692", "0.46695408", "0.4664546", "0.46633264", "0.46585402", "0.4653367", "0.46511003", "0.4650063", "0.46461725", "0.4644291", "0.464062", "0.4636723", "0.4634395", "0.4634111", "0.46297914", "0.46279386", "0.46239358", "0.46221554", "0.46200192", "0.46199492", "0.46183145", "0.4615935", "0.46149585", "0.46143872", "0.4612415", "0.4612415", "0.46099347", "0.46075904", "0.46075904", "0.46075904", "0.46075904", "0.46066254", "0.46056452", "0.45989186", "0.45937198", "0.45932972", "0.45788857", "0.45746446", "0.45713815", "0.45711878", "0.45711878", "0.457056", "0.45676315", "0.45654657", "0.45596436", "0.45583022", "0.45581412", "0.45529845", "0.45529845", "0.45529845", "0.45529845", "0.4550934", "0.4550575", "0.4550575", "0.4550575" ]
0.7399692
0
Sample from list of probabilities `pvals` with replacement. The probabilities don't need to be normalized.
def sample_probabilities(pvals: np.ndarray) -> Callable[[], int]: return Sampler(np.cumsum(pvals))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _correct_p_values(self, p_vals):\r\n num_tests = len([p_val for p_val in p_vals if p_val is not None])\r\n corrected_p_vals = []\r\n for p_val in p_vals:\r\n if p_val is not None:\r\n corrected_p_vals.append(min(p_val * num_tests, 1))\r\n else:\r\n corrected_p_vals.append(p_val)\r\n return corrected_p_vals", "def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())", "def get_Sample(self, values, probabilities):\r\n return choices(values,probabilities)\r\n # return np.random.choice(values,p=probabilities)\r", "def mutateList(values, numMutate, vmin, vmax, rabs=True):\n\tmutations = set()\n\tcount = 0\n\twhile count < numMutate:\n\t\tj = randint(0, len(values)-1)\n\t\tif j not in mutations:\n\t\t\ts = np.random.uniform(vmin, vmax)\n\t\t\tvalues[j] = s if rabs else values[j] * s\n\t\t\tcount += 1\n\t\t\tmutations.add(j)\n\treturn values", "def sample_distribution(numbers, probabilities, num_samples):\n intervals = []\n intervals.append(probabilities[0])\n new_interval = probabilities[0]\n\n for i in range(1, len(probabilities)):\n new_interval += probabilities[i]\n intervals.append(new_interval)\n\n counter = 0\n new_numbers = []\n while counter <= num_samples:\n for i in range(len(intervals)):\n # Generate a random num between 0 - 1\n # i.e. flip a coin.\n rand_prob = np.random.random_sample((1,))\n if rand_prob <= [intervals[i]]:\n new_numbers.append(numbers[i])\n counter += 1\n\n return new_numbers", "def resampleParticles(self, gameState):\n self.particles = []\n for i in range(self.numParticles):\n self.particles.append(tuple(util.sample(self.uniformPrior) for _ in\n self.ghostIndices))", "def samplingWithReplacement(m):\n return [ random.randrange(m) for i in range(m) ]", "def __setitem__(self, values, p):\n if isinstance(values, dict):\n values = [values[var] for var in self.variables]\n self.prob[values] = p\n for var,val in zip(self.variables, values):\n if val not in self.vals[var]:\n self.vals[var].append(val)", "def initPmf(self, values):\n for value, prob in values.items():\n self.set(value, prob)", "def weighted_values(values, probabilities, size):\n bins = np.add.accumulate(probabilities)\n indices = np.digitize(random_sample(size), bins)\n sample = []\n for ind in indices:\n sample.append(deepcopy(values[ind]))\n return sample", "def sample(a, p):\n if (len(a) != len(p)):\n raise Exception('a != p')\n p = np.array(p)\n p = p / p.sum()\n r = random.random()\n n = len(a)\n total = 0 # range: [0,1]\n for i in xrange(n):\n total += p[i]\n if total > r:\n return a[i]\n return a[i]", "def prob_choice(p):\n \n return np.random.random_sample() < p", "def sample(self, probabilities):\n return self.sample_bernoulli(probabilities)", "def sample_from_list(l, probs, max_n=None):\n assert len(l) == len(probs), 'given list l and probs must have same length'\n if max_n is None:\n max_n = len(l)\n sum_probs = sum(probs)\n if sum_probs == 0:\n return []\n probs_ = np.array(probs) / sum_probs\n # we draw max n or |probs_ > 0|\n # noinspection PyTypeChecker\n n = min(max_n, np.sum(probs_ > 0))\n # use idx approach as direct passing to np.random.choice would convert\n # items of l into str\n # noinspection PyUnresolvedReferences\n res = [\n l[idx] for idx in np.random.choice(len(l), n, replace=False, p=probs_)\n ]\n return res", "def resample_particles(self):\n # make sure the distribution is normalized\n self.normalize_particles()\n\n newParticles = []\n for i in range(len(self.particle_cloud)):\n # resample the same # of particles\n choice = random_sample()\n # all the particle weights sum to 1\n csum = 0 # cumulative sum\n for particle in self.particle_cloud:\n csum += particle.w\n if csum >= choice:\n # if the random choice fell within the particle's weight\n newParticles.append(deepcopy(particle))\n break\n self.particle_cloud = newParticles", "def bonferroni_correction(pvals):\r\n return (\r\n array(pvals, dtype=float) * len(pvals) # float conversion: Nones->nans\r\n )", "def correct_pvalues_for_multiple_testing(pvalues, correction_type = \"Benjamini-Hochberg\"):\n pvalues = array(pvalues)\n n = int(pvalues.shape[0])\n new_pvalues = empty(n)\n if correction_type == \"Bonferroni\":\n new_pvalues = n * pvalues\n elif correction_type == \"Bonferroni-Holm\":\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n for rank, vals in enumerate(values):\n pvalue, i = vals\n new_pvalues[i] = (n-rank) * pvalue\n elif correction_type == \"Benjamini-Hochberg\":\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n values.reverse()\n new_values = []\n for i, vals in enumerate(values):\n rank = n - i\n pvalue, index = vals\n new_values.append((n/rank) * pvalue)\n for i in range(0, int(n)-1):\n if new_values[i] < new_values[i+1]:\n new_values[i+1] = new_values[i]\n for i, vals in enumerate(values):\n pvalue, index = vals\n new_pvalues[index] = new_values[i]\n return new_pvalues", "def fdr_correction(pvals):\r\n tmp = array(pvals).astype(float) # this converts Nones to nans\r\n return tmp * tmp.size / (1. + argsort(argsort(tmp)).astype(float))", "def test_correct_p_values(self):\r\n exp = [0.003, 0.006, 0.003]\r\n obs = self.mc._correct_p_values([0.001, 0.002, 0.001])\r\n assert_almost_equal(obs, exp)", "def correct_pvalues_for_multiple_testing(pvalues, correction_type=\"Benjamini-Hochberg\"):\r\n from numpy import array, empty\r\n pvalues = array(pvalues)\r\n n = float(pvalues.shape[0])\r\n new_pvalues = empty(n)\r\n if correction_type == \"Bonferroni\":\r\n new_pvalues = n * pvalues\r\n elif correction_type == \"Bonferroni-Holm\":\r\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\r\n values.sort()\r\n for rank, vals in enumerate(values):\r\n pvalue, i = vals\r\n new_pvalues[i] = (n - rank) * pvalue\r\n elif correction_type == \"Benjamini-Hochberg\":\r\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\r\n values.sort()\r\n values.reverse()\r\n new_values = []\r\n for i, vals in enumerate(values):\r\n rank = n - i\r\n pvalue, index = vals\r\n new_values.append((n / rank) * pvalue)\r\n for i in range(0, int(n) - 1):\r\n if new_values[i] < new_values[i + 1]:\r\n new_values[i + 1] = new_values[i]\r\n for i, vals in enumerate(values):\r\n pvalue, index = vals\r\n new_pvalues[index] = new_values[i]\r\n return new_pvalues", "def categorical(pvals: np.ndarray) -> int:\n\n return sample_probabilities(pvals)() # faster than: np.argmax(np.random.multinomial(1, normalize(pvals)))", "def swapBetweenLists(values1, values2):\n\tp1 = randint(0, len(values1)-1)\n\tp2 = randint(0, len(values2)-1)\n\ttmp = values1[p1]\t\n\tvalues1[p1] = values2[p2]\n\tvalues2[p2] = tmp", "def sampleWithReplacement(population, choiceSize):\n\n n = len(population)\n _random, _int = random.random, int # speed hack\n return [_int(_random()*n) for _ in itertools.repeat(None, choiceSize)]", "def sample_discrete(probs):\r\n q = np.random.rand()\r\n i = 0\r\n p_sum = 0.0\r\n while p_sum < q:\r\n p_sum += probs[i]\r\n i += 1\r\n return i - 1", "def sample_gp_pred(self, nsamp, input_list, lv=None):\n x_pred = np.stack(input_list)\n if lv is None:\n if (\n self.params.model_str == 'optfixedsig'\n or self.params.model_str == 'opt'\n or self.params.model_str == 'fixedparam'\n ):\n lv = self.sample_list[0]\n elif (\n self.params.model_str == 'samp'\n or self.params.model_str == 'sampfixedsig'\n ):\n lv = self.sample_list[np.random.randint(len(self.sample_list))]\n postmu, postcov = gp_post(\n self.data.x,\n self.data.y,\n x_pred,\n lv.ls,\n lv.alpha,\n lv.sigma,\n self.params.kernel,\n )\n single_post_sample = sample_mvn(postmu, postcov, 1).reshape(-1)\n pred_list = [\n single_post_sample for _ in range(nsamp)\n ] #### TODO: instead of duplicating this TS, sample nsamp times from generative process (given/conditioned-on this TS)\n return list(np.stack(pred_list).T)", "def sample_from_probabilities(probabilities, topn=ALPHASIZE):\n p = np.squeeze(probabilities)\n p[np.argsort(p)[:-topn]] = 0\n p = p / np.sum(p)\n return np.random.choice(ALPHASIZE, 1, p=p)[0]", "def _mutate(self,arr,p_mut):\n mut = np.random.random_sample(arr.shape)<p_mut\n no_mut = ~mut\n mut_val = np.random.uniform(low=self.minval,high=self.maxval,size=arr.shape)\n return (no_mut*arr) + (mut*mut_val)", "def random_value(self, selected_vals):\n pass", "def sample_prob(probs):\n return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs)", "def sample_prob(probs):\n return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs)", "def initMapping(self, values):\n for value, prob in values.items():\n self.set(value, prob)", "def pval(trueVal,randomWinsList):\n pvaluelist=[]\n denom=len(randomWinsList)\n for num in randomWinsList:\n if num >= trueVal:\n pvaluelist.append(num) \n pvaluelistLen = len(plist)\n \n return pvaluelistLen*1.0/denom*1.0", "def sample_data(_,\n val,\n sampling_strategy=spec.SamplingStrategy.UNDERSAMPLE,\n side=0):\n\n if sampling_strategy == spec.SamplingStrategy.UNDERSAMPLE:\n random_sample_data = random.sample(val, side)\n elif sampling_strategy == spec.SamplingStrategy.OVERSAMPLE:\n random_sample_data = random.choices(val, k=side)\n else:\n raise ValueError(\"Invalid value for sampling_strategy variable!\")\n\n for item in random_sample_data:\n yield item", "def p_value_inflation_test(p_values):\n from scipy.stats import ks_2samp\n h_null = np.random.uniform(0, 1, size=int(1e6))\n d, p_value = ks_2samp(p_values, h_null)\n return p_value, d", "def mutate(self, probability, rate):\n for i in range(self.number_of_transitions):\n shape = np.shape(self.weights[i])\n size = self.weights[i].size\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < probability:\n weights[j] = weights[j] + rate * np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < probability:\n self.biases[i][j] = self.biases[i][j] + rate * np.random.normal(0, 1)", "def weighted_values(values, probabilities, size):\n bins = np.add.accumulate(probabilities)\n return values[np.digitize(random_sample(size), bins)]", "def subsample(self, proposals, targets):\n\n labels, regression_targets = self.prepare_targets(proposals, targets)\n sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)\n\n proposals = list(proposals)\n # add corresponding label and regression_targets information to the bounding boxes\n for labels_per_image, regression_targets_per_image, proposals_per_image in zip(\n labels, regression_targets, proposals\n ):\n proposals_per_image.add_field(\"labels\", labels_per_image)\n proposals_per_image.add_field(\n \"regression_targets\", regression_targets_per_image\n )\n\n # distributed sampled proposals, that were obtained on all feature maps\n # concatenated via the fg_bg_sampler, into individual feature map levels\n for img_idx, (pos_inds_img, neg_inds_img) in enumerate(\n zip(sampled_pos_inds, sampled_neg_inds)\n ):\n img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)\n proposals_per_image = proposals[img_idx][img_sampled_inds]\n proposals[img_idx] = proposals_per_image\n\n self._proposals = proposals\n return proposals", "def int_with_probability(list_of_values):\n sum_of_values = sum(list_of_values)\n\n # pick a random value from 0 to sum\n r = random.randrange(0, sum_of_values)\n new_sum = 0\n\n for item in list_of_values:\n new_sum += item\n if new_sum >= r:\n return item", "def postgen_list(self, x_list, s, nsamp):\n x_list = self.transform_xin_list(x_list)\n pred_list = self.sample_gp_post_pred(\n nsamp, x_list, full_cov=True, nloop=np.min([50, nsamp])\n )\n pred_list = [self.dt.inv_transform_y_data(pr) for pr in pred_list]\n return pred_list", "def sample_control(Pi, t):\n\n uvec, pvec = zip(*[(pi[t], pval) for pi, pval in Pi.items()\n if len(pi) > t])\n pvec = np.array(pvec) / sum(pvec)\n u = np.random.choice(uvec, p=pvec)\n\n return u", "def get_replace_prob(self, all_words):\n cur_tf_idf = collections.defaultdict(int)\n for word in all_words:\n cur_tf_idf[word] += 1. / len(all_words) * self.idf.get(word, 0)\n replace_prob = []\n for word in all_words:\n replace_prob += [cur_tf_idf[word]]\n replace_prob = np.array(replace_prob)\n replace_prob = np.max(replace_prob) - replace_prob\n if replace_prob.sum() != 0.0:\n replace_prob = replace_prob / replace_prob.sum() * self.token_prob * len(all_words)\n return replace_prob", "def _sample_position(self, positions, current_belief):\n new_belief = np.copy(current_belief)\n\n # Threshold Belief and re-normalize\n # If we are very sure of one goal we do not care about the others\n for i in range(self._num_goals):\n if current_belief[i] < self._belief_threshold:\n new_belief[i] = 0.0\n\n # print \"probs belief before:\"\n # print new_belief\n\n # if we are very unsure about one goal we do not use it\n if np.max(new_belief) == 0.0:\n new_belief = np.copy(current_belief)\n for i in range(self._num_goals):\n if current_belief[i] < self._belief_threshold_min:\n new_belief[i] = 0.0\n print \"using old belief above min threshold\"\n\n # this should never happen I think unless we have super many goals\n if np.max(new_belief) == 0.0:\n new_belief = np.copy(current_belief)\n print \"using old belief, should not happen\"\n\n # print \"probs belief:\"\n # print new_belief\n new_belief = new_belief / np.sum(new_belief)\n\n\n idx = np.random.choice(a=np.arange(len(positions)), p=new_belief)\n return np.asarray(positions[idx]), self._goals[idx]", "def frequency_to_probability_distribution(\n index_num, node_indv_vals, prop_or_freq, test=False\n):\n\n # Checks for NaN values\n if np.isnan(node_indv_vals).any():\n raise Exception('NaN value encountered in {} array'.format(prop_or_freq))\n\n if prop_or_freq == 'propensity':\n node_indv_vals = np.array(range(1, (node_indv_vals.shape[0]+1)))\n\n # Converts fitness scores into probabilities\n total = node_indv_vals.sum()\n\n node_probabilities = np.full(node_indv_vals.shape, np.nan)\n for index, val in np.ndenumerate(node_indv_vals):\n index = index[0] # Numpy array indices are tuples\n node_probabilities[index] = val / total\n\n # Randomly shuffles probability array, in order to avoid smallest\n # probabilities being grouped together at the beginning of the range\n if test is False:\n (index_num, node_indv_vals, node_probabilities) = random_shuffle(\n index_num, node_indv_vals, node_probabilities\n )\n\n return index_num, node_indv_vals, node_probabilities", "def adjustPValues(p_values, method=\"fdr\"):\n\tadjusted_p_values = p_values[:]\n\tn = len(p_values)\n\tif method.lower() == \"bh\" or method.lower() == 'fdr':\n\t\tni = range(n,0,-1) # from n to 1\n\t\t# Sort the P values and keep track of the indices\n\t\tindexed_pv = sorted(zip(p_values, range(n)), reverse=True)\n\t\t(pvals,inds) = zip(*indexed_pv)\n\t\t# adjust\n\t\tnewp = [(float(n)/ni[xi])*pvals[xi] for xi in range(n)]\n\t\tcum_min_p = [min(newp[0:xi]) for xi in range(1,n+1)]\n\t\tadjp_sorted = [min(p,1.0) for p in cum_min_p]\n\t\t# re-sort\n\t\tadjusted_p_values = [-1]*n\n\t\tfor xi in range(n):\n\t\t\tadjusted_p_values[inds[xi]] = adjp_sorted[xi]\n\telif method.lower() == 'bonferroni':\n\t\tadjusted_p_values = [min(n*p,1.0) for p in p_values]\n\treturn adjusted_p_values", "def percent(values, p=0.5):\n m = min(values)\n interval = max(values) - m\n return m + p*interval", "def resample(self, samplePool, weights):\n # newSamples = random.choices(samplePool, weights, k=self.numParticles)\n # Python 3.6 can do the previous, but now we need to do it by hand.\n newSamples = []\n newWeights = []\n for i in range(len(samplePool)):\n randVal = random.random()\n sampIndex = 0\n total = weights[0]\n while randVal >= total:\n sampIndex += 1\n total += weights[sampIndex]\n newSamples.append(samplePool[sampIndex])\n newWeights.append(weights[sampIndex])\n return newSamples, newWeights", "def sample(prediction):\n p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p", "def sample(prediction):\n p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p", "def sample(prediction):\n p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p", "def sdd(events,probs):\n \n import random\n nprobs=[x*1000 for x in probs] #so, here i multiply each float in 'probs' by 1000 and store the products in 'nprobs'\n newlist=[]\n for a in range(len(events)) : #then, in this loop, i create a list (newlist), in which each event appears 1000*its probability times\n b=nprobs[a]\n b=int(b)\n for c in range(b) :\n newlist.append(events[a]) \n return (random.choice(newlist)) #and finally, i ramdonly sample ", "def sample_by_replacing_constants(self, sample_args, expressions):\n max_children = sample_args.num_modules - 1\n if max_children <= 0:\n return\n if isinstance(expressions, ops.Op):\n expressions = [expressions]\n constants = ops.number_constants(expressions)\n if not constants:\n raise ValueError('No constants to replace in {}'\n .format([str(expr) for expr in expressions]))\n\n sample_count = random.randint(1, min(max_children, len(constants)))\n constants = random.sample(constants, sample_count)\n\n values = [constant.value for constant in constants]\n entities = self.sample(sample_args, values)\n for constant, entity in zip(constants, entities):\n constant.value = entity.handle", "def sample(x, p=None):\n s = np.random.random_sample()\n if p is None:\n return x[int(s*len(x))]\n else:\n p = np.cumsum(p)\n p = p / float(p[-1])\n return x[sum(s >= p)]", "def _interpolate_ppsd(freqs, spectrogram, fmin, fmax):\r\n # frequencies at which ppsd is evaluated\r\n f_new = np.logspace(np.log10(fmin), np.log10(fmax), 7500)\r\n\r\n # interpolate ppsds (colums of spectrogram) at the new frequencies\r\n wins = spectrogram.shape[1]\r\n spec_new = np.zeros((f_new.size, wins))\r\n for i in range(wins):\r\n f = interp.interp1d(freqs, spectrogram[:,i], kind=\"cubic\")\r\n spec_new[:,i] = f(f_new)\r\n return f_new, spec_new", "def subsample(self, proposals, targets):\n\n labels, regression_targets = self.prepare_targets(proposals, targets)\n sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)\n\n # # proposals = list(proposals)\n # # add corresponding label and regression_targets information to the bounding boxes\n # for labels_per_image, regression_targets_per_image, proposals_per_image in zip(\n # labels, regression_targets, proposals\n # ):\n # proposals_per_image.add_field(\"labels\", labels_per_image)\n # proposals_per_image.add_field(\n # \"regression_targets\", regression_targets_per_image\n # )\n\n # distributed sampled proposals, that were obtained on all feature maps\n # concatenated via the fg_bg_sampler, into individual feature map levels\n for img_idx, (pos_inds_img, neg_inds_img) in enumerate(\n zip(sampled_pos_inds, sampled_neg_inds)\n ):\n img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)\n proposals[img_idx] = proposals[img_idx][img_sampled_inds]\n labels[img_idx] = labels[img_idx][img_sampled_inds]\n regression_targets[img_idx] = regression_targets[img_idx][img_sampled_inds]\n\n self._proposals = {\"proposals\": proposals, \"labels\": labels, \"regression_targets\": regression_targets}\n return proposals", "def set_interpolation_xs(self, vals):\n vals = np.array(vals)\n self._interp_xs = vals\n self.is_changed =True\n # self._interp_ys = None\n self.get_interpolation()", "def sample_unsupervised(\n self,\n n: int,\n items: List[str],\n embeddings: np.ndarray,\n max_replaces: int = 10,\n ) -> List[Tuple[str, np.ndarray]]:\n self._centroids = {} # Reset centroids when model gets trained\n \n # Create sampling list\n all_items = list(set(items)) * max_replaces\n shuffle(all_items)\n \n def get_other_item(item: str) -> np.ndarray:\n \"\"\"Get the embedding of another item.\"\"\"\n i = choice(range(len(items)))\n while items[i] == item:\n i = choice(range(len(items)))\n return embeddings[i]\n \n # Sample with (limited) replacement, unweighted sampling\n result = []\n for sample in np.random.choice(all_items, size=min(n, len(all_items)), replace=False):\n result.append((sample, get_other_item(sample)))\n return result", "def random_replacement(random, population, parents, offspring, args):\r\n num_elites = args.setdefault('num_elites', 0)\r\n population.sort(reverse=True)\r\n num_to_replace = min(len(offspring), len(population) - num_elites) \r\n valid_indices = range(num_elites, len(population))\r\n rep_index = random.sample(valid_indices, num_to_replace)\r\n for i, repind in enumerate(rep_index):\r\n population[repind] = offspring[i]\r\n return population", "def sample(self, policy_params, **kwargs):\n return self.head.sample(policy_params, **kwargs)", "def custom_pdf(self, cum_probs, values):\n rnd_num = random()\n for p in range(len(cum_probs)):\n if rnd_num < cum_probs[p]:\n return values[p]", "def sample_gp_post_pred(self, nsamp, input_list, full_cov=False, nloop=None):\n if (\n self.params.model_str == 'optfixedsig'\n or self.params.model_str == 'opt'\n or self.params.model_str == 'fixedparam'\n ):\n nloop = 1\n sampids = [0]\n elif self.params.model_str == 'samp' or self.params.model_str == 'sampfixedsig':\n if nloop is None:\n nloop = nsamp\n nsamp = int(nsamp / nloop)\n sampids = np.random.randint(len(self.sample_list), size=(nloop,))\n ppred_list = []\n for i in range(nloop):\n samp = self.sample_list[sampids[i]]\n postmu, postcov = gp_post(\n self.data.x,\n self.data.y,\n np.stack(input_list),\n samp.ls,\n samp.alpha,\n samp.sigma,\n self.params.kernel,\n full_cov,\n )\n if full_cov:\n ppred_list.extend(list(sample_mvn(postmu, postcov, nsamp)))\n else:\n ppred_list.extend(\n list(\n np.random.normal(\n postmu.reshape(-1,),\n postcov.reshape(-1,),\n size=(nsamp, len(input_list)),\n )\n )\n )\n return list(np.stack(ppred_list).T)", "def post(self, s):\n return np.random.choice(self.sample_list)", "def random_replacement(random, population, parents, offspring, args):\n num_elites = args.setdefault('num_elites', 0)\n population.sort(reverse=True)\n num_to_replace = min(len(offspring), len(population) - num_elites) \n valid_indices = range(num_elites, len(population))\n rep_index = random.sample(valid_indices, num_to_replace)\n for i, repind in enumerate(rep_index):\n population[repind] = offspring[i]\n return population", "def choice(some_list, probabilities, max_probability=1):\n x = random.uniform(0, max_probability)\n cumulative_probability = 0.0\n\n for item, item_probability in zip(some_list, probabilities):\n cumulative_probability += item_probability\n if x < cumulative_probability: break\n\n return item", "def fitness_proportional(population, scores, next_gen_number, random_seed=42):\n\n np.random.seed(random_seed)\n\n score_array = np.array(scores)\n score_array = -score_array + abs(np.max(score_array))\n\n probabilities = score_array / np.sum(score_array)\n\n indices = list(range(len(population)))\n indices_array = np.array(indices)\n\n selected_indices = np.random.choice(\n indices_array, size=next_gen_number, p=probabilities\n )\n\n selected = []\n for indx in selected_indices:\n selected.append(population[indx])\n\n return selected", "def probability(self, samples):\n pass", "def deterministic_sample(choices, n_to_sample, p): # pylint: disable=invalid-name\n\n sample_counts = np.ceil(n_to_sample * p).astype(int)\n\n n_to_remove = np.sum(sample_counts) - n_to_sample\n\n if n_to_remove == 0:\n return choices[counts_to_vector(sample_counts)]\n\n non_zero_mask = sample_counts > 0\n\n removal_indices = np.floor(np.linspace(0.0,\n np.sum(non_zero_mask),\n n_to_remove,\n endpoint=False)).astype(int)\n\n tmp = sample_counts[non_zero_mask]\n tmp[removal_indices] = tmp[removal_indices] - 1\n\n sample_counts[non_zero_mask] = tmp\n\n assert np.sum(sample_counts) == n_to_sample\n\n samples = choices[counts_to_vector(sample_counts)]\n\n return samples", "def test_correct_p_values_mixed(self):\r\n exp = [None, 0.008, 0.01, None]\r\n obs = self.mc._correct_p_values([None, 0.004, 0.005, None])\r\n self.assertEqual(obs, exp)", "def mutate(pop, mut_prob, kd_min, kd_max, kp_min, kp_max, ki_min, ki_max):\n pop_curr = pop\n for i in range(0, len(pop_curr)):\n for o in range(3) :\n if random.random() < mut_prob:\n if random.random() < 0.5:\n pop_curr[i][o] = round(pop_curr[i][o] * 0.95, 2) #Maintains 2 d.p\n else :\n pop_curr[i][o] = round(pop_curr[i][o] * 1.05, 2)\n if pop_curr[i][0] > kd_max :\n pop_curr[i][0] = float(kd_max) \n if pop_curr[i][1] > kp_max :\n pop_curr[i][1] = float(kp_max)\n if pop_curr[i][2] > ki_max :\n pop_curr[i][2] = float(ki_max)\n return pop_curr", "def sample(self, probs):\n all_abstain = (self.label_matrix == -1).sum(axis=1) == self.label_matrix.shape[1]\n self.is_in_pool = (self.ground_truth_labels == -1) & ~ all_abstain & (self.y_train != -1)\n self.valid_buckets = np.unique(self.unique_inverse[self.is_in_pool])\n self.is_valid_bucket = np.array([\n True if i in self.valid_buckets else False for i in range(len(self.unique_idx))])\n self.bucket_probs = probs.detach().numpy()[self.unique_idx]\n\n pick = random.uniform(0, 1)\n if pick < self.randomness:\n # Choose random bucket instead of following a specific query strategy\n chosen_bucket = np.random.choice(self.valid_buckets)\n else:\n chosen_bucket = np.random.choice(self.query())\n\n return random.choice(np.where((self.unique_inverse == chosen_bucket) & self.is_in_pool)[0])", "def __sample_policy_action(probs):\n # Subtract a tiny value from probabilities in order to avoid\n # \"ValueError: sum(pvals[:-1]) > 1.0\" in numpy.multinomial\n probs = probs - np.finfo(np.float32).epsneg\n\n action_indexes = [int(np.nonzero(np.random.multinomial(1, p))[0]) for p in probs]\n############################################################################################\n # action_indexes = [np.argmax(p) for p in probs] #select the action with the highest probability instead of randomly sampling\n # print(action_indexes)\n # print('++++++++++++++++++++++++')\n############################################################################################\n return action_indexes", "def test_correct_p_values_no_change(self):\r\n exp = [None, 0.008]\r\n obs = self.mc._correct_p_values([None, 0.008])\r\n self.assertEqual(obs, exp)\r\n exp = [0.007]\r\n obs = self.mc._correct_p_values([0.007])\r\n assert_almost_equal(obs, exp)", "def sample_from(self, p):\n return np.searchsorted(np.cumsum(p), np.random.rand())", "def sample(self,f,N,p=100):\n return [f(x) for x in np.linspace(0,N,p)]", "def __init__(self, vals, probTable):\n\t\t\n\t\tself.myVals = vals\n\t\tif isinstance(probTable, list) or isinstance(probTable, tuple):\n\t\t\tself.probTable = {(): probTable}\n\t\telse:\n\t\t\tself.probTable = probTable", "def distribution(vals):\n tot = 0\n rv = []\n for v in vals:\n tot += v\n for v in vals:\n rv.append(v * 100 // tot)\n # rv.extend(sorted(rv))\n return tuple(rv)", "def get_freq_rand_conversions(xp, seed=0, minval=0.0, maxval=1.0):\n np.random.seed(seed)\n yp = np.cumsum(np.random.poisson(size=xp.shape))\n yp = ((maxval - minval) * (yp - yp.min())) / (yp.max() - yp.min()) + minval\n freq2rand = lambda x : np.interp(x, xp, yp)\n rand2freq = lambda y : np.interp(y, yp, xp)\n return freq2rand, rand2freq", "def multinomial_pmf(sample, probabilities):\r\n # TODO\r\n a=[]\r\n b=[]\r\n i=0\r\n key_list=[]\r\n value_list=[]\r\n for key,value in sample.items():\r\n key_list.append(key)\r\n value_list.append(value)\r\n b=list(sample)\r\n while i< len(b):\r\n a.append(probabilities.keys()[probabilities.values().index(value_list[i])])\r\n\r\n\r\n return a", "def select_item_with_prob(items_prob, n_inst):\n\n items = []\n for i in range(n_inst):\n pick_prob = np.random.uniform()\n\n values, probs = zip(*cum_sum_prob(items_prob))\n idx = bisect_left(probs, pick_prob)\n\n items.append(values[idx])\n\n return items", "def sample_list_item(\n x: List[Any],\n probs: Optional[np.ndarray],\n random_state: RandomState\n) -> Any:\n\n if probs is None:\n probs = np.repeat(1 / len(x), len(x))\n\n cdf_y_rand = random_state.random_sample()\n\n cum_probs = probs.cumsum()\n final_cum_prob = cum_probs[-1]\n\n if abs(1.0 - final_cum_prob) > 0.0000001:\n raise ValueError(f'Expected cumulative probabilities to sum to 1, but got {final_cum_prob} instead.')\n\n x_i = next(\n i\n for i, cum_prob in enumerate(cum_probs)\n if cdf_y_rand < cum_prob\n )\n\n return x[x_i]", "def _randomize_one(p, v):\n if any(p.endswith(s) for s in ('_pd_n', '_pd_nsigma', '_pd_type')):\n return v\n else:\n return np.random.uniform(*parameter_range(p, v))", "def test_rejection_sampling():\n # Check that it works with a numpy array\n original_samples = np.random.uniform(0, 10, (n_samples, n_params))\n weights = np.random.uniform(0, 5, n_samples)\n new_samples = rejection_sampling(original_samples, weights)\n # new_samples should have less samples than what we started with originally\n assert len(new_samples) <= n_samples\n # Each sample should be in the original posterior table\n assert all(new_sample in original_samples for new_sample in new_samples)\n # Each sample should be unique\n unique = np.unique(new_samples, axis=0)\n assert len(unique) == len(new_samples)\n\n # Now check that it works as expected for the\n # pesummary.utils.samples_dict.SamplesDict object\n original_samples = SamplesDict(\n {param: np.random.uniform(0, 10, n_samples) for param in gw_parameters()}\n )\n weights = np.random.uniform(0, 5, n_samples)\n new_samples = rejection_sampling(original_samples, weights)\n assert new_samples.number_of_samples <= original_samples.number_of_samples\n assert new_samples.parameters == original_samples.parameters\n assert all(\n new_sample in original_samples.samples.T for new_sample in\n new_samples.samples.T\n )", "def measurement_update(particles, measured_marker_list, grid: CozGrid):\n\n # step 1 - set weights\n nresample, weighting = measurement_update_set_weights(particles, measured_marker_list, grid)\n\n # step 2a - equal weight in situations with no weights\n norm_factor = sum(weighting)\n if norm_factor == 0.0 or len(measured_marker_list) < 1:\n weighting = [(1.0 / float(len(particles)))] * len(particles)\n else:\n weighting = [w / norm_factor for w in weighting]\n\n # step 2b - probabilistic re-sampling\n NSAMPLE = min(500, min(len(particles), 50 + nresample))\n\n samples = np.random.choice(particles, size = (len(particles) - NSAMPLE), p = weighting).tolist()\n samples = [Particle(oldParticle.x, oldParticle.y, heading=oldParticle.h) for oldParticle in samples]\n\n # step 2c - pad array with random values sampled iid\n random_samples = [grid.random_free_place() for i in range(NSAMPLE)]\n for x, y in random_samples:\n samples.append(Particle(x, y))\n\n return samples", "def shuffle(values):\n num_values = len(values)\n for v in range(num_values):\n # Get a random, different index\n s = v + int(random() * (num_values - v))\n # Swap values\n values[s], values[v] = values[v], values[s]\n return values", "def mutate(chromosomes, gene_pool, mutation_probability, seq_to_fitness):\n mutated_chromosomes = []\n\n for chromosome in chromosomes:\n mutated_chromosome = list(chromosome)\n chromosome_size = len(mutated_chromosome)\n number_of_different_chromosomes = len(gene_pool) ** chromosome_size\n\n for i in range(chromosome_size):\n if random.randint(1, 100) <= mutation_probability:\n mutated_chromosome[i] = random.choice(gene_pool)\n\n num_seq = 0\n\n while str(\n mutated_chromosome) in seq_to_fitness and num_seq < \\\n number_of_different_chromosomes:\n mutated_chromosome[\n random.randint(0, chromosome_size - 1)] = random.choice(\n gene_pool)\n num_seq += 1\n\n mutated_chromosomes.append(mutated_chromosome)\n\n return mutated_chromosomes", "def sample(\n x: Union[IntType, Iterable[Any]],\n size: int = None,\n replace: bool = False,\n prob: Iterable[NumericType] = None,\n) -> Iterable[Any]:\n if isinstance(x, str):\n x = list(x)\n if size is None:\n size = len(x) if is_iterable(x) else x\n return numpy.random.choice(x, int(size), replace=replace, p=prob)", "def resample_population(ps, log_weights, rng):\n\n n_particles = ps.shape[0]\n idx = util.math.discrete_sample(np.exp(log_weights), n_particles, rng=rng)\n ps = ps[idx]\n\n return ps", "def __call__(self, *args):\n r = np.random.rand(*args)\n if type(r) is float:\n samples = self.values[(r < self.p).nonzero()[0][0]]\n elif type(r) is np.ndarray:\n samples = np.array(\n [self.values[np.nonzero(x < self.p)[0][0]] \n for x in r.flat]).reshape(r.shape)\n return samples", "def compute_power(pvals, SNPs):\n\tnsnps = len(pvals)\n\tall_snps = np.arange(0, nsnps)\n\tpos = SNPs\n\tnegs = list(set(all_snps) - set(SNPs))\n\n\tpvals_rank = rank_array(pvals)\n\n\trocr = np.zeros((nsnps, 2))\n\tfor i in all_snps:\n\t\tv = pvals_rank[0:i] # test positives\n\t\tz = list(set(all_snps) - set(v)) # test negatives\n\n\t\tTP = len(set(v) & set(pos))\n\t\tFP = len(set(v) & set(negs))\n\t\tTN = len(set(z) & set(negs))\n\t\tFN = len(set(z) & set(pos))\n\n\t\tTPR = 1.0*TP/(TP+FN); FPR = 1.0*FP/(FP+TN); #FDR = 1.0*FP/(FP+TP)\n\n\t\trocr[i, :] = [FPR, TPR]\n\n\treturn rocr", "def simPP(intensity,bound):\r\n\r\n N=np.random.poisson(bound)\r\n homPP=np.random.uniform(size=N)\r\n PP=np.array([s for s in homPP if bound*np.random.ranf()<=intensity(s)])\r\n\r\n return PP", "def sample_transitions(self, val) -> None:\n\n # get values\n states = val.states\n pi_conc = val.transitions_conc\n num_states = val.num_states\n\n # count the number of each transition that occurs\n counts = np.zeros((num_states + 1, num_states))\n for i in range(num_states):\n counts[-1, i] = np.sum(states[:, :, 0] == i)\n for j in range(num_states):\n counts[i, j] = np.sum((states[:, :, :-1] == i) * (states[:, :, 1:] == j))\n counts[-1, -1] = 0 # fluorophores starting photobleached are interpretted as load off only\n\n # sample from dirichlet distribution\n val.transitions = Dirichlet.sample(counts + pi_conc)\n val.P = self.posterior(val)\n\n return", "def benjamini_hochberg_step_down(pvals):\r\n tmp = fdr_correction(pvals)\r\n corrected_vals = empty(len(pvals))\r\n max_pval = 1.\r\n for i in argsort(pvals)[::-1]:\r\n if tmp[i] < max_pval:\r\n corrected_vals[i] = tmp[i]\r\n max_pval = tmp[i]\r\n else:\r\n corrected_vals[i] = max_pval\r\n return corrected_vals", "def _generate_p(self):\n self._values, weights = zip(*self._weights.items())\n cumsum = list(itertools.accumulate(weights))\n total = cumsum[-1]\n self._p = [i / total for i in cumsum]", "def _sample_proposals_mod(\n\t\tself, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor\n\t) -> Tuple[torch.Tensor, torch.Tensor]:\n\t\thas_gt = gt_classes.numel() > 0\n\t\t# Get the corresponding GT for each proposal\n\t\tif has_gt:\n\t\t\tgt_classes = gt_classes[matched_idxs]\n\t\t\t# Label unmatched proposals (0 label from matcher) as background (label=num_classes)\n\t\t\tgt_classes[matched_labels == 0] = self.num_classes\n\t\t\t# Label ignore proposals (-1 label)\n\t\t\tgt_classes[matched_labels == -1] = -1\n\t\telse:\n\t\t\tgt_classes = torch.zeros_like(matched_idxs) + self.num_classes\n\t\tN = matched_idxs.size()[0]\n\t\tsampled_idxs = torch.arange(N) \n\t\treturn sampled_idxs, gt_classes", "def sample_response(self, slate_p):\n slate_p[slate_p >= 0.5] = 1.0\n slate_p[slate_p < 0.5] = 0.0\n# m = Bernoulli(slate_p)\n# return m.sample()\n return slate_p", "def edit_probs(result):\n for i in range(TOP_E):\n p = result.data[i][1]\n p = round(p, 4)\n # p_str = str(p)[1:]\n result.data[i][1] = p\n\n return result", "def with_random_trials(\r\n cls, ranges: typing.Tuple[\r\n typing.Union[int, set, tuple]],\r\n points: int, trials: int, r_eps: float = 0.01) -> 'FixedSweep':\r\n\r\n # convert ints into the corresponding integer range, lists into tuples\r\n # for convenience (since lists usually can be substituted in where we\r\n # expect tuples), and then convert sets to lists. We will use the list\r\n # type to indicate treating it like a set instead of the set type, b/c\r\n # random.choice expects list-like not set-like.\r\n\r\n # We also ensure that if any element is a float than so is the first\r\n # one for ranges.\r\n\r\n ranges = list(ranges)\r\n for i in range(len(ranges)):\r\n if isinstance(ranges[i], int):\r\n ranges[i] = (0, ranges[i])\r\n elif isinstance(ranges[i], list):\r\n ranges[i] = tuple(ranges[i])\r\n elif isinstance(ranges[i], (set, frozenset)):\r\n ranges[i] = list(ranges[i])\r\n\r\n if isinstance(ranges[i], tuple):\r\n if isinstance(ranges[i][1], float):\r\n ranges[i] = (float(ranges[i][0]), ranges[i][1])\r\n\r\n\r\n\r\n results = []\r\n def should_reject(pms):\r\n for res in results:\r\n num_same = 0\r\n for (rge, v1, v2) in zip(ranges, res, pms):\r\n are_same = v1 == v2\r\n if (v1 == v2 or (\r\n isinstance(rge, tuple)\r\n and abs(v1 - v2) <= r_eps * (rge[1] - rge[0]))):\r\n num_same += 1\r\n if num_same == len(pms):\r\n return True\r\n return False\r\n\r\n n_rejections = 0\r\n while len(results) < points:\r\n params = []\r\n for rg in ranges:\r\n if isinstance(rg, list):\r\n params.append(random.choice(rg))\r\n elif isinstance(rg[0], float):\r\n params.append(random.uniform(rg[0], rg[1]))\r\n else:\r\n params.append(random.randint(rg[0], rg[1]))\r\n if not should_reject(params):\r\n results.append(tuple(params))\r\n else:\r\n n_rejections += 1\r\n if n_rejections > points * 2:\r\n raise ValueError('exceeded max number of rejections: '\r\n + str(n_rejections))\r\n\r\n return cls.with_fixed_trials(results, trials)", "def get_good_val(vals=good_val):\n\n # Return a value from the distribution randomly\n return vals[rand.randint(0, (vals.shape[0] - 1))]", "def _get_random_sample(self):\n p=np.zeros(len(self.dim_ranges))\n for i in range(len(self.dim_ranges)):\n temp=np.linspace(self.dim_ranges[i][0],self.dim_ranges[i][1],1000)\n p[i]=np.random.choice(temp,1,True,None)\n\n return p", "def beta_gen_nonneg(p):\n return np.clip(np.random.normal(0, 3, p), 0, None)", "def __mutate(self, chromosomes, mutation_probability):\n\n for chromosome in chromosomes:\n for i in range(self.chromosome_size):\n if random.randint(1, 100) <= mutation_probability:\n logging.getLogger().debug(\n \"---> Mutation in Chromosome \" + str(\n chromosome.chromosome_id) + \"in gene \" + str(i)\n + \" <---\")\n chromosome.genes[i] = random.choice(self.gene_pool)" ]
[ "0.62992084", "0.5945291", "0.57297397", "0.55846256", "0.5577901", "0.5562105", "0.54901296", "0.539993", "0.5361088", "0.5338176", "0.5308725", "0.53033537", "0.5288212", "0.52389336", "0.52308893", "0.52133656", "0.520034", "0.5190075", "0.5185366", "0.5180876", "0.51693255", "0.5142584", "0.5121982", "0.5107681", "0.50763744", "0.50434035", "0.5041478", "0.50366807", "0.5031274", "0.5031274", "0.5017597", "0.5003785", "0.49881947", "0.49806157", "0.49759483", "0.49679238", "0.49641722", "0.4956856", "0.49530888", "0.49300623", "0.49176362", "0.4907002", "0.4893376", "0.4891962", "0.48882005", "0.48867676", "0.48836368", "0.48836368", "0.48836368", "0.48807877", "0.48804212", "0.48776534", "0.48763922", "0.4875008", "0.48743346", "0.48719904", "0.4863384", "0.48572412", "0.48560888", "0.48500195", "0.4842672", "0.48386216", "0.48346555", "0.48269033", "0.48192978", "0.48152885", "0.48088628", "0.48042938", "0.47968078", "0.47863793", "0.47748905", "0.4767546", "0.47641003", "0.47519112", "0.47455746", "0.47383273", "0.47317395", "0.47313705", "0.4721713", "0.47201115", "0.47143826", "0.47132617", "0.47110847", "0.47074702", "0.46981817", "0.46963084", "0.46940035", "0.46889868", "0.46747383", "0.46686387", "0.4665554", "0.4665281", "0.46544114", "0.46468338", "0.46446833", "0.4632915", "0.46314383", "0.46282342", "0.46253002", "0.46178073" ]
0.6719965
0
Sample from the categorical distribution using `pvals`.
def categorical(pvals: np.ndarray) -> int: return sample_probabilities(pvals)() # faster than: np.argmax(np.random.multinomial(1, normalize(pvals)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample_probabilities(pvals: np.ndarray) -> Callable[[], int]:\n\n return Sampler(np.cumsum(pvals))", "def sample_categorical(distribution):\n sample = random.random()\n for event, prob in distribution.items():\n if sample < prob:\n return event\n sample -= prob\n raise ValueError('sum of distribution less than one')", "def get_Sample(self, values, probabilities):\r\n return choices(values,probabilities)\r\n # return np.random.choice(values,p=probabilities)\r", "def __call__(self, *args):\n r = np.random.rand(*args)\n if type(r) is float:\n samples = self.values[(r < self.p).nonzero()[0][0]]\n elif type(r) is np.ndarray:\n samples = np.array(\n [self.values[np.nonzero(x < self.p)[0][0]] \n for x in r.flat]).reshape(r.shape)\n return samples", "def categorical_sample(prob_n, np_random):\n prob_n = np.asarray(prob_n)\n csprob_n = np.cumsum(prob_n)\n return (csprob_n > np_random.rand()).argmax()", "def categorical_sample(prob_n, np_random):\n prob_n = np.asarray(prob_n)\n csprob_n = np.cumsum(prob_n)\n return (csprob_n > np_random.rand()).argmax()", "def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())", "def sample(self):\n\n # pick sample type according to probability\n samplers = [\"unif\", \"geo\", \"diverse\"]\n sample_idx = np.random.multinomial(\n 1, [self.unif_prob, self.geo_prob, self.diverse_prob])\n idx = np.argmax(sample_idx)\n sampler = samplers[idx]\n\n if sampler == \"unif\":\n return self.unif_sampler()\n if sampler == \"geo\":\n return self.geo_sampler()\n if sampler == \"diverse\":\n return self.diverse_sampler()", "def categorical_sample(prob_n, np_random = None):\n prob_n = np.asarray(prob_n)\n csprob_n = np.cumsum(prob_n)\n return (csprob_n > np.random.rand()).argmax()", "def random_choice(p, size):\n k = p.shape[-1]\n\n if p.ndim > 1:\n # If p is an nd-array, the last axis is interpreted as the class\n # probability. We must iterate over the elements of all the other\n # dimensions.\n # We first ensure that p is broadcasted to the output's shape\n size = to_tuple(size) + (1,)\n p = np.broadcast_arrays(p, np.empty(size))[0]\n out_shape = p.shape[:-1]\n # np.random.choice accepts 1D p arrays, so we semiflatten p to\n # iterate calls using the last axis as the category probabilities\n p = np.reshape(p, (-1, p.shape[-1]))\n samples = np.array([np.random.choice(k, p=p_) for p_ in p])\n # We reshape to the desired output shape\n samples = np.reshape(samples, out_shape)\n else:\n samples = np.random.choice(k, p=p, size=size)\n return samples", "def sample(prediction):\n p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p", "def sample(prediction):\n p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p", "def sample(prediction):\n p = np.zeros(shape=[1, vocabulary_size], dtype=np.float)\n p[0, sample_distribution(prediction[0])] = 1.0\n return p", "def sample(self, policy_params, **kwargs):\n return self.head.sample(policy_params, **kwargs)", "def discrete_sample(p, n_samples=None, rng=np.random):\n\n # check distribution\n # assert isdistribution(p), 'Probabilities must be non-negative and sum to one.'\n\n one_sample = n_samples is None\n\n # cumulative distribution\n c = np.cumsum(p[:-1])[np.newaxis, :]\n\n # get the samples\n r = rng.rand(1 if one_sample else n_samples, 1)\n samples = np.sum((r > c).astype(int), axis=1)\n\n return samples[0] if one_sample else samples", "def sample_discrete(probs):\r\n q = np.random.rand()\r\n i = 0\r\n p_sum = 0.0\r\n while p_sum < q:\r\n p_sum += probs[i]\r\n i += 1\r\n return i - 1", "def sample(self, probabilities):\n return self.sample_bernoulli(probabilities)", "def sample(pi, sigma, mu):\n # print(\"sample: pi:\", pi.size(), pi)\n categorical = Categorical(pi)\n pis = list(categorical.sample().data)\n sample = Variable(sigma.data.new(sigma.size(0), sigma.size(2)).normal_())\n for i, idx in enumerate(pis):\n sample[i] = sample[i].mul(sigma[i,idx]).add(mu[i,idx])\n return sample", "def select_five_categories(prob_dist_dict):\n # For clarity, save keys as labels and values as probabilities.\n labels = list( prob_dist_dict.keys() )\n probs = list( prob_dist_dict.values() )\n\n # Use numpy's .choice() to return a label based on the given weight.\n return list( np.random.choice(labels, 5, p=probs) )", "def prob_choice(p):\n \n return np.random.random_sample() < p", "def sample_data(_,\n val,\n sampling_strategy=spec.SamplingStrategy.UNDERSAMPLE,\n side=0):\n\n if sampling_strategy == spec.SamplingStrategy.UNDERSAMPLE:\n random_sample_data = random.sample(val, side)\n elif sampling_strategy == spec.SamplingStrategy.OVERSAMPLE:\n random_sample_data = random.choices(val, k=side)\n else:\n raise ValueError(\"Invalid value for sampling_strategy variable!\")\n\n for item in random_sample_data:\n yield item", "def _sample(preds, temperature=1.0):\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)", "def discrete_rv(p):\n u = np.random.uniform()\n cdf = np.cumsum(p)\n j = np.searchsorted(cdf, u)\n return j", "def sample_from(self, p):\n return np.searchsorted(np.cumsum(p), np.random.rand())", "def _get_sample(self, p: float) -> np.ndarray:\n return np.where(self.rand_array >= p, 0, 1)", "def sample(self, s):\n rng = np.random.default_rng()\n return rng.choice(np.arange(self.n_actions), p=self.eval(s))", "def _get_random_sample(self):\n p=np.zeros(len(self.dim_ranges))\n for i in range(len(self.dim_ranges)):\n temp=np.linspace(self.dim_ranges[i][0],self.dim_ranges[i][1],1000)\n p[i]=np.random.choice(temp,1,True,None)\n\n return p", "def sample(x, p=None):\n s = np.random.random_sample()\n if p is None:\n return x[int(s*len(x))]\n else:\n p = np.cumsum(p)\n p = p / float(p[-1])\n return x[sum(s >= p)]", "def discrete_uniform_sampler(upper_value):\n return int(np.random.random() * upper_value)", "def sample_transitions(self, val) -> None:\n\n # get values\n states = val.states\n pi_conc = val.transitions_conc\n num_states = val.num_states\n\n # count the number of each transition that occurs\n counts = np.zeros((num_states + 1, num_states))\n for i in range(num_states):\n counts[-1, i] = np.sum(states[:, :, 0] == i)\n for j in range(num_states):\n counts[i, j] = np.sum((states[:, :, :-1] == i) * (states[:, :, 1:] == j))\n counts[-1, -1] = 0 # fluorophores starting photobleached are interpretted as load off only\n\n # sample from dirichlet distribution\n val.transitions = Dirichlet.sample(counts + pi_conc)\n val.P = self.posterior(val)\n\n return", "def random_bins(num_classes, dist):\n N = dist.shape[0]\n bins = np.empty([N,1], dtype=np.int32)\n \n for i in range(N):\n smpl = np.random.choice(num_classes, p=dist[i,:]/np.sum(dist[i,:]))\n bins[i,0] = smpl\n \n return bins", "def sample(self):\n return gc.rand_state.choice(self.domain)", "def sample_filter(val, count=None):\n if count is None:\n # Return a single value\n try:\n return random.sample(list(val), 1)[0]\n except ValueError:\n return None\n else:\n # Return a list\n try:\n return random.sample(list(val), count)\n except ValueError:\n return []", "def test_categorical():\n # assert the distribution of the samples is close to the distribution of the data\n # using cstest:\n # - uniform (assert p-value > 0.05)\n # - very skewed / biased? (assert p-value > 0.05)\n # - inversely correlated (assert correlation < 0)", "def sample(self, shape):\n ps = np.random.random(shape)\n return self.values(ps)", "def create_random_sample(idx_bins,count_array):\n idxs=[]\n for i,x in enumerate(count_array):\n if x > 0:\n idxs.extend(np.random.choice(idx_bins[i],size=x,replace=False))\n return idxs", "def sample(a, p):\n if (len(a) != len(p)):\n raise Exception('a != p')\n p = np.array(p)\n p = p / p.sum()\n r = random.random()\n n = len(a)\n total = 0 # range: [0,1]\n for i in xrange(n):\n total += p[i]\n if total > r:\n return a[i]\n return a[i]", "def _sample_using_random(\n self,\n p: float = 0.1,\n ):\n return sa.func.random() < p", "def sample(self, probs):\n all_abstain = (self.label_matrix == -1).sum(axis=1) == self.label_matrix.shape[1]\n self.is_in_pool = (self.ground_truth_labels == -1) & ~ all_abstain & (self.y_train != -1)\n self.valid_buckets = np.unique(self.unique_inverse[self.is_in_pool])\n self.is_valid_bucket = np.array([\n True if i in self.valid_buckets else False for i in range(len(self.unique_idx))])\n self.bucket_probs = probs.detach().numpy()[self.unique_idx]\n\n pick = random.uniform(0, 1)\n if pick < self.randomness:\n # Choose random bucket instead of following a specific query strategy\n chosen_bucket = np.random.choice(self.valid_buckets)\n else:\n chosen_bucket = np.random.choice(self.query())\n\n return random.choice(np.where((self.unique_inverse == chosen_bucket) & self.is_in_pool)[0])", "def sample(self):\n # return [v.sample() for v in self.variables]\n return self.domain[gc.rand_state.choice(len(self.domain))]", "def sample_control(Pi, t):\n\n uvec, pvec = zip(*[(pi[t], pval) for pi, pval in Pi.items()\n if len(pi) > t])\n pvec = np.array(pvec) / sum(pvec)\n u = np.random.choice(uvec, p=pvec)\n\n return u", "def sample(self, i_episode, action_values):\n sigma = max(self.max_sigma + (self.min_sigma - self.max_sigma)/self.end_episode * i_episode, self.min_sigma) \n return np.random.normal(action_values, sigma)", "def post(self, s):\n return np.random.choice(self.sample_list)", "def sample(preds, temperature=1.0):\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return(np.argmax(probas))", "def sample_action(policy, state):\n nS, nA = policy.shape\n all_actions = np.arange(nA)\n return np.random.choice(all_actions, p=policy[state])", "def random_samples(bins, dist):\n N = dist.shape[0]\n samples = np.empty([N,1], dtype=np.float32)\n \n for i in range(N):\n smpl = np.random.choice(bins, p=dist[i,:]/np.sum(dist[i,:]))\n samples[i,0] = smpl.astype(np.float32)\n \n return samples", "def ppf(self,x):\n return self.categoricalDist.ppf(x)", "def conditional_sample(p, y, temperature, key):\n tol = 1e-7\n p = np.clip(p, tol, 1 - tol)\n\n v = random.uniform(key, shape=y.shape)\n v_prime = (v * p + (1 - p)) * y + (v * (1 - p)) * (1 - y)\n v_prime = np.clip(v_prime, tol, 1 - tol)\n\n logit_v = logit(v_prime)\n logit_p = logit(p)\n return nn.sigmoid((logit_p + logit_v) / (temperature + tol))", "def __sample_policy_action(probs):\n # Subtract a tiny value from probabilities in order to avoid\n # \"ValueError: sum(pvals[:-1]) > 1.0\" in numpy.multinomial\n probs = probs - np.finfo(np.float32).epsneg\n\n action_indexes = [int(np.nonzero(np.random.multinomial(1, p))[0]) for p in probs]\n############################################################################################\n # action_indexes = [np.argmax(p) for p in probs] #select the action with the highest probability instead of randomly sampling\n # print(action_indexes)\n # print('++++++++++++++++++++++++')\n############################################################################################\n return action_indexes", "def probability_categorical(feature, label):\n assert feature.nunique()>2, 'feature category nums must be greater than 2.'\n t = pd.DataFrame({'feature':feature, 'label':label})\n cat = label.unique()\n cat = [(cat[i], cat[i+1]) for i in range(len(cat)-1)]\n prob = label.value_counts(1).to_dict()\n slope = [prob.get(i[0], 0)-prob.get(i[1], 0) for i in cat]\n \n slope_dict = t.feature.value_counts(1).to_dict()\n prob = t.groupby([ 'feature']).label.value_counts(1).to_dict()\n slope_dict = {i:{'category_rate':slope_dict[i], 'slope':[prob.get((i,j[0]), 0)-prob.get((i,j[1]), 0) for j in cat]} for i in slope_dict}\n for i in slope_dict:\n slope_dict[i]['slope_diff'] = sum([abs(slope[j]-slope_dict[i]['slope'][j]) for j in range(len(slope))])\n value1 = sorted([[[i], slope_dict[i]['slope_diff'], slope_dict[i]['category_rate']] for i in slope_dict], key=lambda x:x[1], reverse=1)\n distance = sorted([value1[i][1]-value1[i+1][1] for i in range(len(value1)-1)])\n std = pd.Series([i[1] for i in value1]).std()\n coupe = value1\n dis = distance[0]\n for k in distance:\n value = value1\n while 1:\n for i in range(len(value)-1):\n if value[i][1]-k<value[i+1][1]:\n value[i+1][0] = value[i][0]+value[i+1][0]\n value[i+1][1] = value[i][1]*value[i][2]/(value[i][2]+value[i+1][2])+value[i+1][1]*value[i+1][2]/(value[i][2]+value[i+1][2])\n value[i+1][2] = value[i][2]+value[i+1][2]\n value.remove(value[i])\n break\n if i==len(value)-2:\n break\n if pd.Series([i[1] for i in value]).std()>std:\n coupe = value\n std = pd.Series([i[1] for i in value]).std()\n dis = k\n return {'group':{k:i for i,j in enumerate(coupe) for k in j[0]}, 'data':coupe, \n 'distance':dis, 'distance_index':f'{distance.index(dis)+1}/{len(distance)}', 'std':std}", "def random_times(p):\n while True:\n if sum(p.values()) != 1:\n raise ValueError('Probabilities must sum to unity')\n r = random.random()\n remaining = 1\n for category, probability in p.items():\n remaining -= probability\n if remaining <= r:\n yield category\n break", "def subsampling(dataset, class_column_index, class_max_count, class_dict):\n out = []\n for row in dataset:\n cls = row[class_column_index]\n rInt = np.random.randint(0, class_dict[cls])\n if rInt <= class_max_count:\n out.append(row)\n ss_data = np.array(out)\n\n return ss_data", "def sample(params):\n\n config = {}\n\n for param, value in params.items():\n if hasattr(value, 'rvs'):\n # this is a scipy.stats distribution\n config[param] = value.rvs()\n else:\n # this is a tuple\n config[param] = random.choice(value)\n\n return config", "def sample_from_mixture(x, pred_weights, pred_means, pred_std, amount):\n samples = np.zeros((amount, 2))\n n_mix = len(pred_weights[0])\n to_choose_from = np.arange(n_mix)\n for j, (weights, means, std_devs) in enumerate(\n zip(pred_weights, pred_means, pred_std)):\n index = np.random.choice(to_choose_from, p=weights)\n samples[j, 1] = np.random.normal(means[index], std_devs[index], size=1)\n samples[j, 0] = x[j]\n\n if j == amount - 1:\n break\n return samples", "def _sample_with_replacement(logits, n_samples):\n if hasattr(K.tf, \"random\") and hasattr(K.tf.random, \"categorical\"):\n return K.tf.random.categorical(logits, n_samples, dtype=\"int32\")\n else:\n return K.tf.multinomial(logits, n_samples, output_dtype=\"int32\")", "def df_categorical_column(category_values, num_rows=100, probabilities=None):\n splitter = np.random.choice(range(len(category_values)), num_rows, p=probabilities)\n return pd.Series(pd.Categorical.from_codes(splitter, categories=category_values))", "def sample_prob(probs):\n return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs)", "def sample_prob(probs):\n return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs)", "def test_get_category_value_to_sample_ids(self):\r\n test_data = get_test_data()\r\n actual = get_category_value_to_sample_ids(\r\n test_data['map'],\r\n 'SampleType')\r\n expected = {'feces': ['f1', 'f2', 'f3', 'f4', 'f5', 'f6'],\r\n 'L_palm': ['p1', 'p2'],\r\n 'Tongue': ['t1', 't2'],\r\n 'Other': ['not16S.1']}\r\n self.assertEqual(actual, expected)\r\n\r\n actual = get_category_value_to_sample_ids(test_data['map'], 'year')\r\n expected = {'2008': ['f1', 'f2', 'f3', 'f4', 'f5', 'f6',\r\n 'p1', 'p2', 't1', 't2', 'not16S.1']}\r\n self.assertEqual(actual, expected)\r\n\r\n self.assertRaises(ValueError,\r\n get_category_value_to_sample_ids,\r\n test_data['map'],\r\n 'not.a.real.category')", "def sample(self):\n return self.items[self.np_random.choice(len(self.items))]", "def uniform_sample(x):\n return np.random.choice(x)", "def sample_bool(p=.5):\n return bool(np.random.choice([True, False], p=[p, 1-p]))", "def sample(cdf):\n p = rand()\n #this line is for rounding errors which will cause binary_search to return\n #an index that is out of bounds\n if p == 1.0:\n return cdf[-1]\n else:\n return binary_search(cdf, p)", "def weighted_values(values, probabilities, size):\n bins = np.add.accumulate(probabilities)\n indices = np.digitize(random_sample(size), bins)\n sample = []\n for ind in indices:\n sample.append(deepcopy(values[ind]))\n return sample", "def get_sample(config, n_sample=1):\n if config['distribution'] == 'binary':\n data = np.random.choice([0, 1], size=n_sample, replace=True, p=config['pmf'])\n\n elif config['distribution'] == 'discrete':\n data = np.random.choice(config['category'], size=n_sample, replace=True, p=config['pmf'])\n\n elif config['distribution'] == 'uniform':\n assert float(config['min']) < float(config['max'])\n data=np.random.uniform(low=float(config['min']),high=float(config['max']),size=n_sample)\n\n elif config['distribution'] == 'gaussian':\n data=np.random.normal(loc=float(config['mean']),scale=float(config['std']),size=n_sample)\n data = np.maximum(data, float(config['min']))\n data = np.minimum(data, float(config['max']))\n\n elif config['distribution'] == 'uniform_int':\n if int(config['min'])==int(config['max']):\n data=int(config['min'])*np.ones((n_sample,),dtype='int32')\n else:\n data=np.random.randint(int(config['min']),high=int(config['max']),size=n_sample)\n\n else:\n log.warning('Warning: unknown distribution type: %s' % config['distribution'])\n data = []\n\n return data", "def sample_response(self, slate_p):\n slate_p[slate_p >= 0.5] = 1.0\n slate_p[slate_p < 0.5] = 0.0\n# m = Bernoulli(slate_p)\n# return m.sample()\n return slate_p", "def pr2sample(self, df_prval):\n try:\n var_rand = list(self.marginals.keys())\n except AttributeError:\n var_rand = []\n\n ## Empty case\n if len(var_rand) == 0:\n return DataFrame()\n\n ## Variables to convert\n var_comp = list(set(var_rand).intersection(set(df_prval.columns)))\n if len(var_comp) == 0:\n raise ValueError(\n \"Intersection of df_prval.columns and var_rand must be nonempty\"\n )\n\n samples = zeros(df_prval[var_comp].shape)\n ## Ensure correct column ordering\n prval = df_prval[var_comp].values\n\n ## Apply appropriate marginal\n for ind in range(len(var_comp)):\n ## Map with inverse density\n var = var_comp[ind]\n samples[:, ind] = self.marginals[var].q(prval[:, ind])\n\n return DataFrame(data=samples, columns=var_comp)", "def sample(self, state):\n state = torch.FloatTensor(state)\n\n action_prob = self.network(state)\n action_distribution = Categorical(action_prob)\n action = action_distribution.sample()\n\n return action.cpu().item()", "def _get_sample(self):\n p = self._get_mean()\n u = self.random.random_sample(p.shape)\n sample = u < p\n return sample", "def random_sample(prob):\n def _random_sample_xducer(step):\n def _random_sample_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n return step(r, x) if random() < prob else r\n return _random_sample_step\n return _random_sample_xducer", "def sample(p, temperature, key, num_samples=1):\n tol = 1e-7\n p = np.clip(p, tol, 1 - tol)\n logit_p = logit(p)\n base_randomness = random.logistic(key, shape=(num_samples, *p.shape))\n return nn.sigmoid((logit_p + base_randomness) / (temperature + tol))", "def test_categorical_log_frequency():\n # assert the distribution of the samples is close to the distribution of the data\n # using cstest:\n # - uniform (assert p-value > 0.05)\n # - very skewed / biased? (assert p-value > 0.05)\n # - inversely correlated (assert correlation < 0)", "def sample (self, n):\n y = self.bins\n x = np.r_[0, self.values.cumsum ()] / self.sum\n # interpolate inverse CDF\n out = np.interp (np.random.random (n), x, y)\n if n == 1:\n return out[0]\n else:\n return out.reshape ((n,))", "def sample(self):\n # For each row: round(random .* (max - min) + min, 0)\n random_array = prng.np_random.rand(self.num_discrete_space)\n return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]", "def weighted_values(values, probabilities, size):\n bins = np.add.accumulate(probabilities)\n return values[np.digitize(random_sample(size), bins)]", "def classify_samples(nd, sample_list, cat_spec):\n tmp = []\n\n for key in cat_spec:\n tmp.append([PSD_sym_KL(psd, cat_spec[key]) for psd in nd])\n\n KL = np.array(tmp).T\n\n # This is a confusing formula\n # amounts to: for a given sample, prob of belonging to class k is:\n # (1 / KL_k) / sum_k(KL_i) = sum_k\\i(KL_i) / sum_k(KL_i)\n prob = ((1 / KL).T / (1 / KL).sum(axis=1)).T\n\n row_masks = np.array([row == row.max() for row in prob])\n cats = [cat_spec.columns[mask][0] for mask in row_masks]\n\n items = [('label', cats)] + [('P({})'.format(lab), p) for lab, p in zip(cat_spec.columns, prob.T)]\n df = pd.DataFrame.from_items(items)\n df.index = sample_list\n\n return df", "def sample(self):\n sampleIndices = self.random_state.choice(len(self.X), int(len(self.X)*self.sample_ratio), replace=False)\n\n return self.X[sampleIndices]\n pass", "def initializeDistribution(self):\n if self.nPoints is None:\n self.xArray = np.arange(self.lowerBound,self.upperBound+1)\n else:\n self.xArray = np.linspace(self.lowerBound,self.upperBound,self.nPoints)\n\n # Here the actual calculation of discrete distribution parameters is performed\n self.pdfArray = 1.0/self.xArray.size * np.ones(self.xArray.size)\n paramsDict={}\n paramsDict['outcome'] = self.xArray\n paramsDict['state'] = self.pdfArray\n\n self.categoricalDist = Categorical()\n self.categoricalDist.initializeFromDict(paramsDict)\n initialPerm = randomUtils.randomPermutation(self.xArray.tolist(),self)\n self.pot = np.asarray(initialPerm)", "def choice(population,weights):\r\n\tassert len(population) == len(weights)\r\n\tcdf_vals=cdf(weights)\r\n\treturn population[bisect.bisect(cdf_vals, random.random())]", "def sample(self, n_samples: int, random_state: Optional[int] = None) -> np.ndarray:\n\n if self.prob.ndim == 1:\n samples = from_categorical(\n stats.multinomial.rvs(\n n=1, p=self.prob, size=n_samples, random_state=random_state\n )\n )\n else:\n n_classes = len(self.prob) # type: ignore\n samples = np.zeros((n_samples, n_classes))\n\n for cls in range(n_classes):\n samples[:, cls] = from_categorical(stats.multinomial.rvs(n=1, p=self.prob[cls], size=n_samples, random_state=random_state)) # type: ignore\n\n return samples", "def sample_from_probabilities(probabilities, topn=ALPHASIZE):\n p = np.squeeze(probabilities)\n p[np.argsort(p)[:-topn]] = 0\n p = p / np.sum(p)\n return np.random.choice(ALPHASIZE, 1, p=p)[0]", "def test_probabilities_are_ok(self, seed):\n bins = defaultdict(int)\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", \"2\", \"3\", \"4\")\n categories = OrderedDict(zip(categories, probs))\n dim = Categorical(\"yolo\", categories)\n for _ in range(500):\n sample = dim.sample(seed=seed)[0]\n bins[sample] += 1\n for keys in bins.keys():\n bins[keys] /= float(500)\n for key, value in categories.items():\n assert abs(bins[key] - value) < 0.01", "def random_value(self, selected_vals):\n pass", "def sample_from(space):\n distrs = {\n 'choice': choice,\n 'randint': randint,\n 'uniform': uniform,\n 'normal': normal,\n }\n s = space[0]\n\n np.random.seed(int(time.time() + np.random.randint(0, 300)))\n\n log = s.startswith('log_')\n s = s[len('log_'):] if log else s\n\n quantized = s.startswith('q')\n s = s[1:] if quantized else s\n\n distr = distrs[s]\n if s == 'choice':\n return distr(space[1])\n samp = distr(space[1], space[2])\n if log:\n samp = np.exp(samp)\n if quantized:\n samp = round((samp / space[3]) * space[3])\n return samp", "def p_value_inflation_test(p_values):\n from scipy.stats import ks_2samp\n h_null = np.random.uniform(0, 1, size=int(1e6))\n d, p_value = ks_2samp(p_values, h_null)\n return p_value, d", "def direct_sample(self, trial_count):\n count = 0\n\n for i in xrange(trial_count):\n values = {}\n\n for letter in self.letters:\n prob = self.variables[letter].get_prob(values)\n values[letter] = self.sample(prob)\n\n if values[self.query.variable]:\n count += 1\n\n return float(count) / trial_count", "def sample_observations(self):\n sample_dict={}\n for obs in self.observations:\n domain = obs.properties['domain']\n #If probabilistic, sample from the true distribution\n if obs.type=='probabilistic':\n probabilities = obs.properties['probability']\n #If uncontrollable, sample uniformly from the domain\n else:\n probabilities = [1.0/len(domain)]*len(domain)\n\n #Samples a value from the discrete probability distribution\n u_sample =random.random()\n acc_prob=0.0\n for val,prob in zip(domain,probabilities):\n acc_prob+=prob\n if u_sample<= acc_prob:\n sample_dict[obs]=val\n break\n\n return sample_dict", "def test_discrete_distribution():\n rng = utils.RandomState(0)\n distribution = dist.DiscreteDistribution(rng)\n with pytest.raises(NotImplementedError):\n distribution.sample([])\n with pytest.raises(NotImplementedError):\n distribution.log_probability([], None)\n with pytest.raises(NotImplementedError):\n distribution.support([])", "def _gen_pert(self, count, **kwargs):\n self._check_pert(**kwargs)\n pert = FairBetaPert(**kwargs)\n rvs = pert.random_variates(count)\n return rvs", "def sample(*arrays, **options):\n \n n_arrays = len(arrays)\n if n_arrays == 0:\n raise ValueError(\"At least one array required as input\")\n\n random_state = options.pop('random_state', None)\n\n frac = options.pop('frac', 0.5)\n\n categorytosample = options.pop('categorytosample', None)\n categories = options.pop('categories', None)\n if not categorytosample is None and categories is None:\n raise ValueError(\"Categories have to be provided if sampling by category is requested.\")\n\n if options:\n raise TypeError(\"Invalid parameters passed: %s\" % str(options))\n\n rng = check_random_state(random_state)\n if categorytosample is None:\n maxidx=arrays[0].shape[0]\n nrows=int(maxidx*frac)\n indicestosample=np.linspace(0,maxidx-1,num=maxidx)\n indicesnottosample=np.array([])\n else:\n cat=categories.copy().reset_index(drop=True)\n indicestosample=np.array(cat[cat==categorytosample].index)\n indicesnottosample=np.array(cat[cat!=categorytosample].index)\n nrows=int(indicestosample.shape[0]*frac)\n \n rng.shuffle(indicestosample)\n indices=np.sort(np.concatenate((indicestosample[:nrows],indicesnottosample)))\n\n return list(safe_indexing(a, indices) for a in arrays)", "def sample_distribution(numbers, probabilities, num_samples):\n intervals = []\n intervals.append(probabilities[0])\n new_interval = probabilities[0]\n\n for i in range(1, len(probabilities)):\n new_interval += probabilities[i]\n intervals.append(new_interval)\n\n counter = 0\n new_numbers = []\n while counter <= num_samples:\n for i in range(len(intervals)):\n # Generate a random num between 0 - 1\n # i.e. flip a coin.\n rand_prob = np.random.random_sample((1,))\n if rand_prob <= [intervals[i]]:\n new_numbers.append(numbers[i])\n counter += 1\n\n return new_numbers", "def get_good_val(vals=good_val):\n\n # Return a value from the distribution randomly\n return vals[rand.randint(0, (vals.shape[0] - 1))]", "def probability(self, samples):\n pass", "def test_with_dict(self, seed):\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim = Categorical(\"yolo\", OrderedDict(zip(categories, probs)))\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert samples[0] == 2\n assert dim._probs == probs\n\n assert categories == dim.categories\n\n assert 2 in dim\n assert 0 not in dim\n\n assert dim.name == \"yolo\"\n assert dim.type == \"categorical\"\n assert dim.shape == ()", "def sample(self):\n if self._intervals is None:\n self._intervals = Intervals(self._table)\n\n if self._intervals.is_empty():\n raise ValueError()\n\n return self._intervals.sample()", "def test_with_tuple(self, seed):\n categories = (\"asdfa\", 2)\n dim = Categorical(\"yolo\", categories)\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert samples[0] == \"asdfa\"\n assert dim._probs == (0.5, 0.5)\n\n assert categories == dim.categories\n\n assert 2 in dim\n assert 3 not in dim\n\n assert (\n str(dim) == \"Categorical(name=yolo, prior={asdfa: 0.50, 2: 0.50}, \"\n \"shape=(), default value=None)\"\n )\n\n assert dim.name == \"yolo\"\n assert dim.type == \"categorical\"\n assert dim.shape == ()", "def sample(self, n):\n # Est probs from priority weights\n summed = sum(self.weight) + self.eps\n self.probs = [w / summed for w in self.priority]\n\n # Wieghted sample\n return np.random.choice(self.memory, size=n, p=self.probs).tolist()", "def custom_pdf(self, cum_probs, values):\n rnd_num = random()\n for p in range(len(cum_probs)):\n if rnd_num < cum_probs[p]:\n return values[p]", "def samplePopulation(self, k) -> \"Population\":\n sampledPopulation = Population(self.population_size, self.map)\n sampledPopulation.population = random.choices(self.population, k=k)\n sampledPopulation.sortPopulation()\n return sampledPopulation", "def sample_from_softmax_policy(self, batch_state):\n batch_q_values = self.forward(batch_state)\n batch_pi = F.softmax(batch_q_values, dim=1)\n batch_size = batch_pi.shape[0]\n batch_actions = torch.empty(batch_size, 1)\n for i in range(batch_size):\n pi = batch_pi[i, :]\n dist = torch.distributions.Categorical(pi)\n # Subtract 1, so batch_actions is in {-1, 0, 1}\n batch_actions[i, 0] = dist.sample().view(1, 1) - 1\n if use_cuda:\n batch_actions = batch_actions.to(batch_state.get_device())\n return batch_actions.long()" ]
[ "0.6776044", "0.650533", "0.64588654", "0.6282057", "0.6240135", "0.6240135", "0.62277555", "0.6160603", "0.6149961", "0.60994315", "0.60755867", "0.60755867", "0.60755867", "0.6050657", "0.6038272", "0.6014659", "0.5976859", "0.5965328", "0.59274614", "0.5909927", "0.58288866", "0.5760419", "0.57574266", "0.5740209", "0.5732299", "0.5724458", "0.5709076", "0.56702936", "0.56483847", "0.5647799", "0.5647297", "0.564259", "0.55911636", "0.55812883", "0.5580305", "0.55613154", "0.55488515", "0.55466765", "0.5539292", "0.5538431", "0.55269635", "0.54907006", "0.54890925", "0.54786885", "0.54782575", "0.54767096", "0.5472664", "0.5467969", "0.5448161", "0.5446919", "0.54427135", "0.54297984", "0.54290813", "0.54236645", "0.5422926", "0.5399749", "0.5398042", "0.5398042", "0.5367623", "0.53648955", "0.5360447", "0.53578985", "0.535398", "0.5353865", "0.53533584", "0.5351607", "0.5348879", "0.5347139", "0.5340668", "0.53368247", "0.53285515", "0.53110874", "0.531009", "0.53037", "0.5303273", "0.53019327", "0.5294467", "0.5291775", "0.52826285", "0.5270225", "0.52702075", "0.5263909", "0.52638835", "0.5260291", "0.5254312", "0.5251027", "0.520861", "0.51984626", "0.5198092", "0.51849115", "0.51845044", "0.5177869", "0.5171909", "0.51705223", "0.51698697", "0.5165266", "0.51649237", "0.5161326", "0.51613206", "0.51546216" ]
0.7592371
0
Convert a population (list of observations) to a CDF.
def population2cdf(population: np.ndarray) -> np.ndarray: population = np.sort(population) return np.searchsorted(population, population, side="right") / len(population)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cdf(self,x):\n if hasattr(x,'__len__'):\n returnCdf = np.array([self.cdf(i) for i in x])\n else:\n returnCdf = self._distribution.cdf(x)\n return returnCdf", "def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue", "def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue", "def cdf(self,x):\n return self.categoricalDist.cdf(x)", "def cdf(self,x):\n if self.method == 'spline':\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n else:\n self.raiseAnError(NotImplementedError,'cdf not yet implemented for ' + self.method + ' method')\n return cdfValue", "def cdf(self, X, Y):\n assert self.fitted, \"model must be fitted to compute likelihood score\"\n X, Y = self._handle_input_dimensionality(X, Y, fitting=False)\n p = self.sess.run(self.cdf_, feed_dict={self.X_ph: X, self.Y_ph: Y})\n assert p.ndim == 1 and p.shape[0] == X.shape[0]\n return p", "def cdf(self, data, survival=False, **kwargs):\n\n data = np.array(data)\n if not data.any():\n return array([np.nan]), array([np.nan])\n\n #data = trim_to_range(data, xmin=xmin, xmax=xmax)\n\n n = float(len(data))\n\n data = np.sort(data)\n all_unique = not( any( data[:-1]==data[1:] ) )\n\n if all_unique:\n CDF = np.arange(n)/n\n else:\n #This clever bit is a way of using searchsorted to rapidly calculate the\n #CDF of data with repeated values comes from Adam Ginsburg's plfit code,\n #specifically https://github.com/keflavich/plfit/commit/453edc36e4eb35f35a34b6c792a6d8c7e848d3b5#plfit/plfit.py\n CDF = np.searchsorted(data, data,side='left') / n\n unique_data, unique_indices = np.unique(data, return_index=True)\n data=unique_data\n CDF = CDF[unique_indices]\n\n if survival:\n CDF = 1-CDF\n return data, CDF", "def cdf(self, value=None):\n if value is None:\n value = self.value\n return self.rv.cdf(\n value, *self._pymc_dists_to_value(self.args), **self.kwds\n )", "def cdf(weights):\r\n\treturn np.cumsum(weights) / sum(weights)", "def cdfFunction(f, x, N):\r\n return ssstats.binom.cdf(x, N, f)", "def cdf(self, value):\n return self._normal.cdf(value)", "def cdf(self,x):\n if self.functionType == 'cdf':\n cdfValue = self.cdfFunc(x)\n else:\n cdfValue = self.pdfFunc.integral(self.data[0][0],x)\n return cdfValue", "def test_cdf(log_prob_coo):\n\n offset_dict = log_prob_coo['offsets']\n\n # the input\n print(log_prob_coo)\n print('input log probs')\n print(log_prob_sparse_to_dense(log_prob_coo['coo']))\n\n # with this shape converter, we get one row, where each value is one m\n converter = IndexConverter(total_n_cells=1,\n total_n_genes=log_prob_coo['coo'].shape[0])\n\n # set up and estimate\n estimator = ThresholdCDF(index_converter=converter)\n noise_csr = estimator.estimate_noise(noise_log_prob_coo=log_prob_coo['coo'],\n noise_offsets=offset_dict,\n q=0.5)\n\n # output\n print('dense noise count estimate, per m')\n out_per_m = np.array(noise_csr.todense()).squeeze()\n print(out_per_m)\n print('truth')\n print(log_prob_coo['cdfs'])\n\n # test\n np.testing.assert_array_equal(out_per_m, log_prob_coo['cdfs'])", "def icdf(self, value):\n return self._normal.icdf(value)", "def ecdf(data):\n x = np.sort(data)\n cdf = np.linspace(0, 1, len(x))\n return cdf, x", "def cdf_to_pdf(cdf):\n pdf = deepcopy(cdf)\n pdf[1:] -= pdf[:-1].copy()\n return pdf", "def _convertDistrPointsToCdf(self,pts):\n try:\n return self.cdf(pts.real)\n except TypeError:\n return list(self.cdf(x) for x in pts)", "def cdf(self, value):\n cdf = torch.where(\n value < 1., \n self.base.cdf(value), \n torch.ones_like(value) # all of the mass\n )\n cdf = torch.where(value < 0., torch.zeros_like(cdf), cdf)\n return cdf", "def ca_to_coils_second_df(agent_df):", "def getCDF(self):\n return self.cdfSample", "def csv_to_cdf(metadata):\n\n basefile = metadata[\"basefile\"]\n\n try:\n ds = read_exo(basefile + \".csv\", skiprows=metadata[\"skiprows\"])\n except UnicodeDecodeError:\n # try reading as Mac OS Western for old versions of Mac Excel\n ds = read_exo(\n basefile + \".csv\", skiprows=metadata[\"skiprows\"], encoding=\"mac-roman\"\n )\n\n metadata.pop(\"skiprows\")\n\n # write out metadata first, then deal exclusively with xarray attrs\n ds = utils.write_metadata(ds, metadata)\n\n del metadata\n\n ds = utils.ensure_cf(ds)\n\n ds = utils.shift_time(ds, 0)\n\n # configure file\n cdf_filename = ds.attrs[\"filename\"] + \"-raw.cdf\"\n\n ds.to_netcdf(cdf_filename, unlimited_dims=[\"time\"])\n\n print(\"Finished writing data to %s\" % cdf_filename)\n\n return ds", "def cdf(data, args):\n return Plot._dist(data, args)", "def cdf(self, points):\n if self._y_cdf is not None:\n x = points[:, 0]\n y = points[:, 1]\n\n # map the y coordinate first.\n y_out = self._y_cdf(y)\n\n # select which x quantile curve to use.\n x_curve = (y_out - self.y_min) * self.y_res / (self.y_max - self.y_min)\n x_curve = np.floor(x_curve).astype(\"int\")\n\n # map the x coordinate.\n x_range = np.arange(x.shape[0])\n x_out = np.zeros_like(x)\n for i in range(self.y_res):\n mask = x_curve == i\n x_out[x_range[mask]] = self._x_cdfs[i](x[mask])\n\n x_out = tf.cast(x_out, dtype=points.dtype)\n y_out = tf.cast(y_out, dtype=points.dtype)\n return np.column_stack((x_out, y_out))\n else:\n raise RuntimeError(\n \"CumulativeDensityFunction: Must call compute() with the correct \"\n \"direction before evaluation.\"\n )", "def cdf(x, point):\n raise NotImplementedError(\"The cdf method has not yet been implemented.\")", "def pmf2cdf(pdf: np.ndarray) -> np.ndarray:\n\n cdf = np.cumsum(pdf)\n return cdf / cdf[-1]", "def build_cdf(self,\n label: str, \n weights : str = None) -> np.array :\n\n if not weights == None:\n table = self.sample.sort_values(label)\n w = table[weights].values\n return np.array([table[label].values, np.cumsum(w) / np.sum(w)])\n else:\n return np.array([self.sample.sort_values(label)[label].values, \n (np.arange(len(self.sample)) + 1) / len(self.sample)])", "def get_counties(popdf: pd.DataFrame, jhucov: str, regionf: str):\n\n\tdfcov = pd.read_csv(jhucov, usecols=JHUC_COLNUM, dtype=JHUC_DTYPE, parse_dates=[3],\n\t\tdayfirst=False, infer_datetime_format=True)\n\tdfcov['Last_Update']= pd.to_datetime(dfcov['Last_Update'], format='%m/%d/%y', exact=True)\n\tdfcov = dfcov.rename(JHUC_RENAM)\n\t# deal with blank county FIPS, primarily in UT, do_regions handles these:\n\tdfcov = do_regions(dfcov, regionf)\n\tdfcov.dropna(inplace=True, subset=['FIPS'])\n\tdfcov.set_index('FIPS', drop=False, inplace=True, verify_integrity=True)\n\tdfcov.sort_index(inplace=True)\n\n\tdf = dfcov.combine_first(popdf)\n\tdf['DeathstoPop'] = 100*(df['Deaths'] / df['Pop'])\n\tdf['CasestoPop'] = 100*(df['Confirmed'] / df['Pop'])\n\t# cleanup on aisle 'floats with NaN's'\n\tdf['DeathstoPop'].fillna(value=0, inplace=True)\n\tdf['CasestoPop'].fillna(value=0, inplace=True)\n\tdf['DeathstoPop'] = pd.to_numeric(df['DeathstoPop'])\n\tdf['CasestoPop'] = pd.to_numeric(df['CasestoPop'])\n\treturn df", "def test_cumulative_distribution_fit_df_call_np_array(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()", "def cdf(array, figsize, color, label, xlabel, ylabel, title, textsize, xsize, ysize, loc):\r\n fig, ax = plt.subplots(figsize=figsize)\r\n x = np.sort(array)\r\n y = np.array(range(len(array)))/float(len(array))*100 \r\n ax.plot(x, y, color = color, label = label) # plot the CDF\r\n ax.set_title(title, weight = 'bold', size = textsize)\r\n ax.set_xlabel(xlabel, weight = 'bold', size = textsize)\r\n ax.set_ylabel(ylabel, weight = 'bold', size = textsize)\r\n plt.xticks(fontsize = xsize)\r\n plt.yticks(fontsize = ysize)\r\n plt.legend(loc = loc)", "def gen_population(self):\n df = self.get_df()\n idd_list = list(set(df['idd'].to_list()))\n date_list = df['ft_data_dt'].to_list()\n st_data_dt = min(date_list)\n end_data_dt = max(date_list)\n self.set_st_data_dt(st_data_dt)\n self.set_end_data_dt(end_data_dt)\n date_series = pd.date_range(*(pd.to_datetime([st_data_dt, end_data_dt]) + pd.offsets.MonthEnd()), freq='M', name='ft_data_dt')\n date_frame = date_series.to_frame()\n idd_series = pd.Series(data=idd_list, name='idd')\n idd_frame = idd_series.to_frame()\n date_frame['key'] = 0\n idd_frame['key'] = 0\n population = idd_frame.merge(date_frame, on='key', how='outer').drop(columns=['key'])\n self.set_population(population)", "def cov(x: DataFrame, y: Optional[Iterable] = None, ddof: int = 1) -> DataFrame:\n # TODO: support na_rm, use, method. see `?cov` in R\n return x.cov(ddof=ddof)", "def usa_covid_cases():\n output_df = pd.DataFrame()\n covid_df = pd.read_csv(r'https://covid.ourworldindata.org/data/owid-covid-data.csv')\n usa_df = covid_df[covid_df[\"iso_code\"] == \"USA\"]\n # print(usa_df.columns)\n output_df[\"Date\"] = usa_df[\"date\"]\n output_df[\"New Cases\"] = usa_df[\"new_cases\"]\n output_df[\"Fully Vaccinated / 100\"] = usa_df[\"people_fully_vaccinated_per_hundred\"]\n print(output_df)\n output_df.to_csv(\"database/usa_covid.csv\", index=False)", "def cif_df(cif_object) -> DataFrame:\n if cif_object is None:\n return DataFrame()\n row_list = cif_object.row_list\n attr_list = cif_object.attribute_list\n return DataFrame(data=row_list, columns=attr_list)", "def population():\n pop_csv = pd.read_csv(csv_path(\"Population.csv\"),\n index_col=0,\n dtype={\"Population\": np.uint32})\n pop_csv[\"Population\"] = (pop_csv[\"Population\"] * POPULATION_SCALE).astype(\"uint32\")\n return pop_csv", "def convertDF(fcs, sample_number=0):\r\n # Create an FCSdata object holding data from the FCS file\r\n FCSdata = convertFCS(fcs, sample_number)\r\n # Convert the cleaned FCS data into a PANDAS dataframe\r\n # Clean up the df by converting to int since values after decimal won't change the analysis\r\n df = pd.DataFrame(FCSdata.data, columns=FCSdata.channel_names).astype(int)\r\n return df", "def get_covariates_df(dataset_name: str) -> pd.DataFrame:\n path = Path(dataset_name) / COVARIATES_FILE\n return get_dataframe(path)", "def cdf(X, parameters):\n check_data_type_column_data(X)\n check_model_params_dict(parameters)\n\n sigma = (1.0/parameters['rho'])**.5\n\n return norm.cdf(X,parameters['mu'],sigma)", "def _(x: Iterable, y: Iterable, ddof: int = 1) -> DataFrame:\n # ddof: numpy v1.5+\n return numpy.cov(x, y, ddof=ddof)[0][1]", "def dataframe(countries):\n\n\t# read in data from dictionary\n\tdf = pd.DataFrame.from_dict(countries)\n\n\tinfant_mortality = df['Infant mortality (per 1000 births)']\n\t# convert sting into float with dot instead of comma and put it back in data frame\n\tdf['Infant mortality (per 1000 births)'] = df['Infant mortality (per 1000 births)'].str.replace(',','.').astype(float)\n\tdf['Pop. Density (per sq. mi.)'] = df['Pop. Density (per sq. mi.)'].str.replace(',','.').astype(float)\n\tdf['GDP ($ per capita) dollars'] = df['GDP ($ per capita) dollars'].astype(int)\n\n\treturn df", "def civic_cancer_genes():\n\n civic_genes_location = os.path.join(data_location, 'gene_catalog', 'civic_gene_summaries.tsv')\n civic_genes_data = pd.read_csv(civic_genes_location, skipinitialspace=True, usecols=['name'], delimiter='\\t')\n civic_genes = list(civic_genes_data['name'])\n\n return civic_genes", "def cov(self) -> 'DataFrame':\n if self._is_string():\n raise TypeError('DataFrame consists only of strings. Must have int, float, '\n 'or bool columns')\n\n x: ndarray = self._values_number()\n if x.dtype.kind == 'i':\n x0: ndarray = x[0]\n x_diff: ndarray = x - x0\n Exy: ndarray = (x_diff.T @ x_diff)\n Ex: ndarray = x_diff.sum(0)[np.newaxis, :]\n ExEy: ndarray = Ex.T @ Ex\n counts: Union[int, ndarray] = len(x)\n else:\n x0 = _math.get_first_non_nan(x)\n x_diff = x - x0\n x_not_nan: ndarray = (~np.isnan(x)).astype(int)\n\n x_diff_0: ndarray = np.nan_to_num(x_diff)\n counts = (x_not_nan.T @ x_not_nan)\n Exy = (x_diff_0.T @ x_diff_0)\n Ex = (x_diff_0.T @ x_not_nan)\n ExEy = Ex * Ex.T\n\n with np.errstate(invalid='ignore'):\n cov: ndarray = (Exy - ExEy / counts) / (counts - 1)\n\n new_data: Dict[str, ndarray] = {'f': np.asfortranarray(cov)}\n new_column_info: ColInfoT = {'Column Name': utils.Column('S', 0, 0)}\n new_columns: ndarray = np.empty(x.shape[1] + 1, dtype='O')\n new_columns[0] = 'Column Name'\n\n i: int = 0\n for col, dtype, loc in self._col_info_iter(): # type: str, str, int\n if dtype not in 'ifb':\n continue\n new_column_info[col] = utils.Column('f', i, i + 1)\n new_columns[i + 1] = col\n i += 1\n new_data['S'] = np.asfortranarray(new_columns[1:])[:, np.newaxis]\n return self._construct_from_new(new_data, new_column_info,\n np.asarray(new_columns, dtype='O'))", "def get_ccdf(degseq):\n uniques, counts = np.unique(degseq, return_counts=True)\n cumprob = np.cumsum(counts).astype(np.double) / (degseq.size)\n return uniques[::-1], (1. - cumprob)[::-1]", "def d_dbias(self, ):\n out = self.gen_df_db(self.params, self.output_shape)(self.current_input)\n return out", "def point_to_cdf(self, point):\n geomstats.errors.check_belongs(point, self)\n point = gs.to_ndarray(point, to_ndim=2)\n return lambda x: self.cdf(x, point)", "def compute_cdf(ordered_weights):\n return numpy.cumsum(ordered_weights) - 0.5 * ordered_weights", "def shapely_to_cf(geometries: xr.DataArray | Sequence, grid_mapping: str | None = None):\n # Get all types to call the appropriate translation function.\n types = {\n geom.item().geom_type if isinstance(geom, xr.DataArray) else geom.geom_type\n for geom in geometries\n }\n if types.issubset({\"Point\", \"MultiPoint\"}):\n ds = points_to_cf(geometries)\n elif types.issubset({\"Polygon\", \"MultiPolygon\"}) or types.issubset(\n {\"LineString\", \"MultiLineString\"}\n ):\n raise NotImplementedError(\"Only point geometries conversion is implemented.\")\n else:\n raise ValueError(\n f\"Mixed geometry types are not supported in CF-compliant datasets. Got {types}\"\n )\n\n # Special treatment of selected grid mappings\n if grid_mapping == \"longitude_latitude\":\n # Special case for longitude_latitude grid mapping\n ds = ds.rename(crd_x=\"lon\", crd_y=\"lat\")\n ds.lon.attrs.update(units=\"degrees_east\", standard_name=\"longitude\")\n ds.lat.attrs.update(units=\"degrees_north\", standard_name=\"latitude\")\n ds.geometry_container.attrs.update(coordinates=\"lon lat\")\n ds.x.attrs.update(units=\"degrees_east\", standard_name=\"longitude\")\n ds.y.attrs.update(units=\"degrees_north\", standard_name=\"latitude\")\n elif grid_mapping is not None:\n raise NotImplementedError(\n f\"Only grid mapping longitude_latitude is implemented. Got {grid_mapping}.\"\n )\n\n return ds", "def read_population() -> pd.DataFrame:\n\n pop_df = pd.read_csv(\"data/API_SP.POP.TOTL_DS2_en_csv_v2_988606.csv\",\n header=2, usecols=[0,62], names=[\"Country\", \"Population\"])\n\n index = pop_df[pop_df[\"Country\"]==\"Iran, Islamic Rep.\"].index.values[0]\n pop_df.loc[index, \"Country\"] = \"Iran\"\n index = pop_df[pop_df[\"Country\"] == \"United States\"].index.values[0]\n pop_df.loc[index, \"Country\"] = \"US\"\n index = pop_df[pop_df[\"Country\"] == \"Russian Federation\"].index.values[0]\n pop_df.loc[index, \"Country\"] = \"Russia\"\n\n pop_df = pop_df.dropna()\n\n return pop_df", "def data_with_fips(self) -> pd.DataFrame:\n return self.data", "def makecldf(args):\n with_dataset(args, Dataset._install)", "def _empirical_cdf(self, x):\n\n return CDF(self.passage_times.tolist()).cdf(x)", "def cdf(sample, location=0, scale=1):\n location = T.cast(location, theano.config.floatX)\n scale = T.cast(scale, theano.config.floatX)\n\n div = T.sqrt(2 * scale ** 2 + epsilon)\n div = T.cast(div, theano.config.floatX)\n\n erf_arg = (sample - location) / div\n return .5 * (1 + T.erf(erf_arg + epsilon))", "def get_cdf(dist):\n cdf = []\n total = 0\n for i in range(len(dist)):\n total += dist[i]\n cdf.append(total)\n return cdf", "def marginalCdf(self, x):\n if self.method == 'pca':\n marginalCdfForPCA = self._distribution.marginalCdfForPCA(x)\n else:\n self.raiseAnError(NotImplementedError,'marginalCdf not yet implemented for ' + self.method + ' method')\n return marginalCdfForPCA", "def _convertCdfPointsToDistr(self,pts):\n try:\n return self.ppf(pts.real)\n except TypeError:\n return list(self.ppf(x) for x in pts)", "def cdf_discretize(self,variables=[]):\n #the errors in the code are due to the deleted files that require packages to be installed on the computer\n for i in variables:\n x=unique(self.data[:,i])\n m=max(x)-min(x)\n f=lambda x0,y0: array([m*(x0+y0)/(1+m**2), (x0*m+y0)/(1+m**2)])\n cdf=array([np.sum(self.data[:,i]<=t) for t in x])\n d=array([norm(array([x0,cdf[k]])-f(x0,cdf[k])) for k,x0 in\\\n enumerate(x)])", "def get_countypop(popfile: str, excludefile: str):\n\tdfpop = pd.read_csv(popfile, usecols={0,3}, dtype={'FIPS': str,'Pop': int})\n\tdfpop.set_index('FIPS', drop=True, inplace=True, verify_integrity=True) # df has only fips index and Pop field\n\tdfpop.sort_index(inplace=True)\n\texcl = pd.read_csv(excludefile, usecols={7,8,9,10,11,12}, dtype={'fips0': str, 'fips1': str, 'fips2': str,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t'fips3': str, 'fips4': str, 'fips5': str})\n\tfor x in range(len(excl)):\n\t\tdfpop.drop(index=excl.iat[x, 0], inplace=True)\n\t\tdfpop.drop(index=excl.iat[x, 1], inplace=True)\n\t\tif pd.notnull(excl.iat[x, 2]):\n\t\t\tdfpop.drop(index=excl.iat[x, 2], inplace=True)\n\t\t\tif pd.notnull(excl.iat[x, 3]):\n\t\t\t\tdfpop.drop(index=excl.iat[x, 3], inplace=True)\n\t\t\t\tif pd.notnull(excl.iat[x, 4]):\n\t\t\t\t\tdfpop.drop(index=excl.iat[x, 4], inplace=True)\n\t\t\t\t\tif pd.notnull(excl.iat[x, 5]):\n\t\t\t\t\t\tdfpop.drop(index=excl.iat[x, 5], inplace=True)\n\n\treturn dfpop", "def process_cgc(path, return_dataframe=False, fusions=False):\n # read in data\n df = pd.read_table(path)\n\n # keep small somatic variants\n if not fusions:\n s = df['Mutation Types']\n is_small = s.str.contains('Mis|F|N|S').fillna(False)\n is_somatic = ~df['Tumour Types(Somatic)'].isnull()\n df = df[is_small & is_somatic].copy()\n\n # label oncogenes / TSG\n df['Is Oncogene (CGC)'] = 'No'\n df.loc[df['Role in Cancer'].fillna('').str.contains('oncogene'), 'Is Oncogene'] = 'Yes'\n df['Is Tumor Suppressor Gene (CGC)'] = 'No'\n df.loc[df['Role in Cancer'].fillna('').str.contains('TSG'), 'Is Tumor Suppressor Gene'] = 'Yes'\n df['Is Driver Gene (CGC)'] = 'Yes'\n\n # rename columns\n df = df.rename(columns={'Entrez GeneId': 'Entrez Gene ID', 'Gene Symbol': 'Hugo Symbol'})\n\n # get gene names\n if not return_dataframe:\n cgc_genes = df['Gene Symbol'].tolist()\n else:\n cgc_genes = df\n\n return cgc_genes\n else:\n # return fusion gene information\n has_fus_partner = ~df['Translocation Partner'].isnull()\n output_list = []\n for ix, row in df[has_fus_partner].iterrows():\n g1 = row[\"Gene Symbol\"]\n for g2 in row['Translocation Partner'].split(', '):\n output_list.append([g1, g2])\n output_df = pd.DataFrame(output_list, columns=[\"Gene1\", \"Gene2\"])\n output_df['GENE_ID'] = output_df['Gene1'] + '--' + output_df['Gene2']\n\n if not return_dataframe:\n cgc_genes = list(set(output_df[\"Gene1\"].unique()) | set(output_df[\"Gene2\"]))\n else:\n cgc_genes = output_df\n\n return cgc_genes", "def getCDFValue(self, value):\n cdfValue = self.cdfSample.get(value, None)\n if cdfValue != None:\n return cdfValue\n \n cdfValue = self.cdfFunction(value)\n self.cdfSample[value] = cdfValue\n return cdfValue", "def get_cdf(self, points=None):\n if points is not None:\n return self.parent.cdf(points)\n else:\n raise ValueError( 'Please digit an input for getCDF method')", "def test_cumulative_distribution_fit_call_pd(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data.to_numpy())\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()", "def get_coco_dataset():\n ds = AttrDict()\n classes = [\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n ]\n ds.classes = {i: name for i, name in enumerate(classes)}\n return ds", "def pdf_from_cdf(data, idx, what):\n\n cdf = data[what + '_sum'].cumsum() / data[what + '_sum'].sum()\n cdfi = scipy.interpolate.interp1d(cdf.index, cdf, 'linear', bounds_error=False)(idx)\n pdfi = np.hstack((cdfi[0], np.diff(cdfi) / np.diff(idx)))\n return pdfi", "def pccovt(dt, dc, namet, genes, condcov=True):\n\timport numpy as np\n\tnt, ns = dt.shape\n\tnc = dc.shape[0]\n\tif nt == 0 or ns == 0:\n\t\traise ValueError('Empty normalized expression.')\n\tif len(namet) != nt or dc.shape[1] != ns:\n\t\traise ValueError('Incompatible input shapes.')\n\tt1 = set(genes) - set(namet)\n\tif len(t1) > 0:\n\t\traise ValueError('Genes not found: {},...'.format(','.join(list(t1)[:3])))\n\n\tif condcov and nc > 0:\n\t\t# Remove covariates\n\t\tfrom sklearn.preprocessing import StandardScaler as ss0\n\t\tfrom sklearn.linear_model import LinearRegression as lr0\n\t\tss = ss0()\n\t\tlr = lr0(fit_intercept=True)\n\t\tt1 = ss.fit_transform(dc.T)\n\t\tlr.fit(t1, dt.T)\n\t\tdt = dt - lr.predict(t1).T\n\n\tnametd = dict(zip(namet, range(len(namet))))\n\tans = pc1(dt[[nametd[x] for x in genes]])\n\n\tdcn = np.concatenate([dc, ans.reshape(1, dc.shape[1])], axis=0)\n\tassert dcn.shape == (nc + 1, ns)\n\treturn dcn", "def generate_csd_1D(csd_profile, csd_seed, start_x=0., end_x=1., res_x=50):\n chrg_pos_x = np.linspace(start_x, end_x, res_x)\n f = csd_profile(chrg_pos_x, seed=csd_seed)\n return chrg_pos_x, f", "def cdf(self, alpha): #Plot empirical cfd with confidence interval\n x = self.x\n n = len(x)\n y = np.arange(1, n+1)/n\n \n #Computing confidence interval with the Dvoretzky–Kiefer–Wolfowitz method based on the empirical points\n F1 = []\n F2 = []\n for i in range(0, n):\n e = (((mt.log(2/alpha))/(2*n))**0.5) \n F1.append(y[i] - e)\n F2.append(y[i] + e) \n plt.plot(sorted(x), y, label='Empirical CDF')\n plt.plot(sorted(x), F1, linestyle='--', color='red', alpha = 0.8, lw = 0.9, label = 'Dvoretzky–Kiefer–Wolfowitz Confidence Bands')\n plt.plot(sorted(x), F2, linestyle='--', color='red', alpha = 0.8, lw = 0.9)\n plt.ylabel('Cumulative Distribution Function')\n plt.xlabel('Observed Data')\n plt.legend()\n plt.show()\n \n return(y)", "def points_to_cf(pts: xr.DataArray | Sequence):\n from shapely.geometry import MultiPoint\n\n if isinstance(pts, xr.DataArray):\n dim = pts.dims[0]\n coord = pts[dim] if dim in pts.coords else None\n pts_ = pts.values.tolist()\n else:\n dim = \"features\"\n coord = None\n pts_ = pts\n\n x, y, node_count, crdX, crdY = [], [], [], [], []\n for pt in pts_:\n if isinstance(pt, MultiPoint):\n xy = np.concatenate([p.coords for p in pt.geoms])\n else:\n xy = np.atleast_2d(pt.coords)\n x.extend(xy[:, 0])\n y.extend(xy[:, 1])\n node_count.append(xy.shape[0])\n crdX.append(xy[0, 0])\n crdY.append(xy[0, 1])\n\n ds = xr.Dataset(\n data_vars={\n \"node_count\": xr.DataArray(node_count, dims=(dim,)),\n \"geometry_container\": xr.DataArray(\n attrs={\n \"geometry_type\": \"point\",\n \"node_count\": \"node_count\",\n \"node_coordinates\": \"x y\",\n \"coordinates\": \"crd_x crd_y\",\n }\n ),\n },\n coords={\n \"x\": xr.DataArray(x, dims=(\"node\",), attrs={\"axis\": \"X\"}),\n \"y\": xr.DataArray(y, dims=(\"node\",), attrs={\"axis\": \"Y\"}),\n \"crd_x\": xr.DataArray(crdX, dims=(dim,), attrs={\"nodes\": \"x\"}),\n \"crd_y\": xr.DataArray(crdY, dims=(dim,), attrs={\"nodes\": \"y\"}),\n },\n )\n\n if coord is not None:\n ds = ds.assign_coords({dim: coord})\n\n # Special case when we have no MultiPoints\n if (ds.node_count == 1).all():\n ds = ds.drop_vars(\"node_count\")\n del ds.geometry_container.attrs[\"node_count\"]\n return ds", "def d(self, df):\n # Get variable names\n var = [key for key, _ in self.marginals.items()]\n df_u = self.sample2pr(df)[var]\n # Evaluate copula density\n l_copula = self.copula.d(df_u.values)\n # Evaluate marginal densities\n L_marginals = zeros((df.shape[0], len(var)))\n for i, v in enumerate(var):\n L_marginals[:, i] = self.marginals[v].d(df[v])\n l_marginals = prod(L_marginals, axis=1)\n\n return l_copula * l_marginals", "def convertToDiscreteFunction(doubleArray: typing.List[float]) -> cern.japc.value.DiscreteFunction:\n ...", "def build_cov_dataset(self):\n return self.ini_eeg_f[:, :, self.mask_tri].copy()", "def cdf(self, x):\n\n if type(x) is np.float64:\n x = np.array([x])\n\n ndx = [np.argmin(np.abs(self.xs - x[i])) for i in range(x.size)]\n\n return self.ys[ndx]", "def cdf(self,x):\n sortedMapping = sorted(self.mapping.items(), key=operator.itemgetter(0))\n if x == sortedMapping[-1][0]:\n return 1.0\n if x in self.values:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if x == ( float(element[0]) if self.isFloat else element[0] ):\n return cumulative\n else:\n if self.isFloat:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if x >= element[0]:\n return cumulative\n # if we reach this point we must error out\n self.raiseAnError(IOError,'Categorical distribution cannot calculate cdf for ' + str(x))", "def CDFconvertToDistr(self,pts):\n return self._convertCdfPointsToDistr(self._convertStdPointsToCdf(pts))", "def cdf(data_r, data_f, xlabel: str = 'Values', ylabel: str = 'Cumulative Sum', ax=None):\n x1 = np.sort(data_r)\n x2 = np.sort(data_f)\n y = np.arange(1, len(data_r) + 1) / len(data_r)\n\n ax = ax if ax else plt.subplots()[1]\n\n axis_font = {'size': '14'}\n ax.set_xlabel(xlabel, **axis_font)\n ax.set_ylabel(ylabel, **axis_font)\n\n ax.grid()\n ax.plot(x1, y, marker='o', linestyle='none', label='Real', ms=8)\n ax.plot(x2, y, marker='o', linestyle='none', label='Synthetic', alpha=0.5)\n ax.tick_params(axis='both', which='major', labelsize=8)\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1), ncol=3)\n\n # If labels are strings, rotate them vertical\n if isinstance(data_r, pd.Series) and data_r.dtypes == 'object':\n ax.set_xticklabels(data_r.value_counts().sort_index().index, rotation='vertical')\n\n if ax is None:\n plt.show()", "def get_codon_arr(chromosome: Chromosome) -> np.ndarray:\n\n seq_len = len(chromosome.sequence)\n arr = np.zeros((seq_len - 2,), dtype=np.int)\n\n for f in chromosome.features:\n\n if f.type != 'CDS':\n continue\n if f.strand == '-':\n continue\n\n protein_len = (f.end - f.start) // 3\n for aa in range(protein_len):\n pos = f.start + (aa * 3) - 1 # -1 to 0-based\n arr[pos] = 1\n\n return arr", "def _icdf(\n op: Op,\n value: TensorVariable,\n *inputs: TensorVariable,\n **kwargs,\n):\n raise NotImplementedError(f\"Inverse CDF method not implemented for {op}\")", "def ecdf(dat, xi):\n dat = np.array(dat)\n\n b_flatten = False\n if len(dat.shape) == 1:\n dat = np.array([dat]).transpose()\n b_flatten = True\n\n n = dat.shape[1]\n m = len(xi)\n\n res = np.zeros((m, n))\n\n for i in range(n):\n func = ecdf_func(dat[:, i])\n res[:, i] = func(xi)\n\n if b_flatten:\n res = res[:, 0]\n\n return res", "def get_cod_freq(gene):\r\n header = gene.iloc[:,0].values[0].split(' ')\r\n geneID=header[0][1:]\r\n\r\n\r\n #get coding sequence\r\n cds = gene.iloc[:,1].values[0].upper().replace('T','U')\r\n codon_count=dict() \r\n \r\n #build dictionary to accumulate codon counts; ignore with stop codons\r\n for codon in list(codon_aa.keys()):\r\n if codon not in [ \"UAA\",\"UAG\", \"UGA\" ]:\r\n codon_count[codon]=0\r\n \r\n ##count codons in cds\r\n codons = []\r\n for c in range(0,len(cds),3): #O(len cds)\r\n cod=cds[c:c+3]\r\n try:\r\n codon_count[cod]+=1\r\n except KeyError:\r\n continue\r\n \r\n #store the fractional freq of each codon in the codon dictionary\r\n total_cod=len(cds)/3 #total number of codons in the cds\r\n for c in list(codon_count.keys()): #O(len codondict)\r\n codon_count[c]/=total_cod\r\n \r\n df_codcnt=pd.DataFrame(list(codon_count.items()) )\r\n df_codcnt.columns=['Codon', 'Fractional_Freq']\r\n df_codcnt=df_codcnt.set_index('Codon').T.reset_index(drop=True)\r\n \r\n df_codcnt['GeneID']=geneID\r\n\t#reorder columns\r\n cols2=[df_codcnt.columns[-1]]+sorted(df_codcnt.columns[:61])\r\n df_codcnt=df_codcnt[cols2]\r\n return df_codcnt", "def read_covid_data() -> pd.DataFrame:\n\n #confirmed_dict = read_covid_csv(\"data/time_series_covid19_confirmed_global.csv\")\n #death_dict = read_covid_csv(\"data/time_series_covid19_deaths_global.csv\")\n\n pool = Pool(12)\n with open(\"data/time_series_covid19_confirmed_global.csv\") as source_file:\n title = source_file.readline() #get rid of the header\n #print(source_file.readline())\n confirmed_lst = pool.map(process, source_file, 4) #parallel processing the data\n\n with open(\"data/time_series_covid19_deaths_global.csv\") as source_file:\n title = source_file.readline()\n death_lst = pool.map(process, source_file, 4)\n\n confirmed_dict = {}\n for d in confirmed_lst:\n confirmed_dict.update(d)\n\n death_dict = {}\n for d in death_lst:\n death_dict.update(d)\n\n confirmed = pd.Series(confirmed_dict)\n death = pd.Series(death_dict)\n covid_data = pd.DataFrame(list(zip(confirmed, death)), columns=[\"Confirmed\", \"Death\"])\n covid_data.insert(0, \"Country\", confirmed_dict.keys())\n\n return covid_data", "def domodfit(profile, fdatc, wmat, xmat):\n # 2010-03-04 14:31 IJC: Created.\n # 2010-03-15 16:42 IJC: Added coefficient-covariance matrix calculation\n\n xmat = array(xmat,copy=True)\n\n if (profile==0).all():\n xmatc = xmat.copy()\n # Add constraint of flux conservation.\n xmatc = hstack((xmat, array([0,1,0]*14+[0]).reshape(43,1))) \n else:\n xmat = vstack((profile, xmat))\n # Add constraint of flux conservation.\n xmatc = hstack((xmat, array([0]+[0,1,0]*14+[0]).reshape(44,1))) \n\n\n xmatc = xmatc.transpose()\n xtw = dot(xmatc.transpose(), wmat)\n coef = dot(linalg.inv(dot(xtw,xmatc)),dot(xtw,fdatc))\n ccov = linalg.inv(dot(xtw,xmatc))\n \n \n return coef, xmat.transpose(), ccov", "def _logcdf(\n op: Op,\n value: TensorVariable,\n *inputs: TensorVariable,\n **kwargs,\n):\n raise NotImplementedError(f\"LogCDF method not implemented for {op}\")", "def CCDF(X, steps=100, log_steps=False, normed=False):\n X = np.array(X)\n \n # Adjust parameters\n if normed:\n norm = float(np.size(X))\n else:\n norm = 1\n \n if log_steps:\n if X.min() <= 0:\n print(\"Can't use log steps for negative numbers. Translating to postive with 0.1 as minumum.\")\n X += X.min() + 0.1\n args_CCDF = np.logspace(X.min(), X.max(), steps)\n else:\n args_CCDF = np.linspace(X.min(), X.max(), steps)\n\n # Calculate CCDF\n vals_CCDF = np.array([np.size(X[X >= i]) / norm for i in args_CCDF])\n plt.plot(args_CCDF, vals_CCDF)\n return args_CCDF, vals_CCDF", "def cmd_makecldf(self, args):\n wl = lingpy.Wordlist(self.raw_dir.joinpath(\"GEM-CNL.csv\").as_posix())\n concepts = args.writer.add_concepts(\n id_factory=lambda x: x.id.split(\"-\")[-1] + \"_\" + slug(x.english), lookup_factory=\"Name\"\n )\n for concept in self.conceptlists[0].concepts.values():\n for cis in concept.attributes[\"lexibank_gloss\"]:\n if cis not in concepts:\n concepts[cis] = concepts[concept.english]\n\n languages = args.writer.add_languages(lookup_factory=\"STEDT_Name\")\n args.writer.add_sources()\n\n for idx, language, concept, value, pos in wl.iter_rows(\n \"doculect\", \"concept\", \"reflex\", \"gfn\"\n ):\n # Fix for 251479\n if concept == \"top (i.e. highest point\":\n concept = \"top (i.e. highest point)\"\n\n if concept not in concepts:\n args.log.warning(concept)\n else:\n args.writer.add_forms_from_value(\n Language_ID=languages[language],\n Parameter_ID=concepts[concept],\n Value=value,\n Source=[\"Marrison1967\"],\n )", "def to_geojson(gdf):\r\n covid_json = gdf.to_json()\r\n return covid_json", "def ecdf(data):\n x = np.sort(data)\n y = np.arange(1,len(x)+1)/len(x)\n\n return x,y", "def _calc_ecdf(self):\n for numerator, vals in self.lift.items():\n for denominator, lift in vals.items():\n raw_data = np.array(lift)\n cdfx = np.sort(np.unique(lift))\n x_values = np.linspace(start=min(cdfx),\n stop=max(cdfx),\n num=len(cdfx))\n size_data = raw_data.size\n y_values = []\n for i in x_values:\n temp = raw_data[raw_data <= i]\n value = temp.size / size_data\n y_values.append(value)\n temp = {}\n temp['x'] = x_values\n temp['y'] = y_values\n if numerator not in self.ecdf.keys():\n self.ecdf[numerator] = {}\n self.ecdf[numerator][denominator] = temp\n else:\n self.ecdf[numerator][denominator] = temp", "def cdf(self, x, mu, **kwargs):\n if not hasattr(self, \"_cdfsample\"):\n # - just get it once\n self._cdfsample = self.rvs(mu, size=1e4, nsample=1e4)\n if is_arraylike(x):\n return np.asarray([float(len(self._cdfsample[self._cdfsample<x_]))/ 1e4\n for x_ in x])\n return float(len(self._cdfsample[self._cdfsample<x]))/ 1e4", "def county_assignments_ferc714(\n fipsified_respondents_ferc714,\n) -> pd.DataFrame:\n df = fipsified_respondents_ferc714[\n [\"respondent_id_ferc714\", \"county_id_fips\", \"report_date\"]\n ]\n # Drop rows where county is blank or a duplicate\n df = df[~df[\"county_id_fips\"].isnull()].drop_duplicates()\n # Convert date to year\n df[\"year\"] = df[\"report_date\"].dt.year\n df.drop(columns=[\"report_date\"], inplace=True)\n return df", "def populate_countries(self):\n # For each country in population.\n for name, pop in self.population.iterrows():\n p = pop['Population']\n # Get all relevant time series based on country name.\n c = self.raw_confirmed.loc[self.raw_confirmed['Country/Region'] == name].sum(numeric_only=True)\n d = self.raw_deceased.loc[self.raw_deceased['Country/Region'] == name].sum(numeric_only=True)\n r = self.raw_recovered.loc[self.raw_recovered['Country/Region'] == name].sum(numeric_only=True)\n # Create new country object.\n self.countries.append(country.Country(name, p, c, d, r))", "def __call__(self, points):\n return self.cdf(points)", "def cdf(self, x):\n\n pi = 3.1415926536\n mean = self.mean\n stddev = self.stddev\n\n x1 = (x - mean) / (stddev * (2 ** 0.5))\n\n erf1 = (2/pi**0.5)\n erf2 = (x1-((x1**3)/3)+((x1**5)/10)-((x1**7)/42)+((x1**9)/216))\n erf = erf1 * erf2\n cdf = (1/2)*(1+erf)\n\n return cdf", "def make_prog(self):\r\n\r\n self.cnv.clear()\r\n cdf = self.df[self.df.L != 0]\r\n c0 = cdf['C0'].value_counts().idxmax()\r\n c1 = cdf['C1'].value_counts().idxmax()\r\n c2 = cdf['C2'].value_counts().idxmax()\r\n c3 = cdf['C3'].value_counts().idxmax()\r\n self.cnv.extend([c0, c1, c2, c3])", "def to_cif(self, container):\n raise NotImplementedError(\"BaseRecord does not implement to_cif.\")", "def pca(self):\n return DataFramePCA(self.subset_)", "def _constructor(self):\n return dnpdata_collection", "def from_cudf_adjlist(\n self,\n offset_col,\n index_col,\n value_col=None,\n renumber=True,\n store_transposed=False,\n ):\n if self._Impl is None:\n self._Impl = simpleGraphImpl(self.graph_properties)\n elif type(self._Impl) is not simpleGraphImpl:\n raise RuntimeError(\"Graph is already initialized\")\n elif self._Impl.edgelist is not None or self._Impl.adjlist is not None:\n raise RuntimeError(\"Graph already has values\")\n self._Impl._simpleGraphImpl__from_adjlist(offset_col, index_col, value_col)", "def cancer_gene_census():\n\n gene_census_location = os.path.join(data_location, 'gene_catalog', 'cancer_gene_census.csv')\n gene_census_data = pd.read_csv(gene_census_location, skipinitialspace=True, usecols=['Gene Symbol', 'Synonyms'])\n gene_census = list(gene_census_data['Gene Symbol'])\n\n logger.info('Number of Cancer Gene Census: {0}\\n'.format(len(gene_census)))\n\n for synonynm in gene_census_data['Synonyms']:\n if type(synonynm) is str:\n gene_census.extend(synonynm.split(','))\n\n return gene_census", "def _compute_covariance_matrix_pd(self, list_obs_1, list_obs_2, pd_dim):\n\n assert isinstance(list_obs_1, list)\n assert isinstance(list_obs_2, list)\n\n fction = partial(self.covariance.compute_pd, i=pd_dim)\n\n cov_matrix = np.zeros((len(list_obs_1), len(list_obs_2)))\n cov_matrix_flat = [\n (i, j, fction(xi, yj))\n for (i, xi) in enumerate(list_obs_1)\n for (j, yj) in enumerate(list_obs_2)\n ]\n for coord_value in cov_matrix_flat:\n cov_matrix[coord_value[:2]] = coord_value[2]\n\n return cov_matrix", "def dfdb(x: np.array) -> np.array:\n return x", "def cdf(self, x):\n from scipy.special import betainc\n sq_x = x * x\n return np.where(\n sq_x < 1., betainc(self.m / 2.0, self.n / 2.0, sq_x),\n np.ones_like(x))", "def generate_train_data(comps: List[pd.DataFrame], concen_upper_bound=1000, num_per_combination=1000):\n cps = [to_int_index(c) for c in comps]\n cps = [zero_end_interpolation(c) for c in comps]\n cps = alignment(cps)\n cps = [scale_dataframe(c) for c in cps]\n\n samples = []\n for n_class in range(1, len(cps) + 1):\n comps_roller = ComponentRoller(cps, n_class)\n concen_roller = ConcentrationRoller(1, concen_upper_bound, n_class)\n for i in range(num_per_combination):\n picked_comps, label = comps_roller.roll()\n concen_vector = concen_roller.roll_unique(label)\n the_sample = pd.Series(name=label, data=np.sum(picked_comps * concen_vector, axis=1))\n samples.append(the_sample)\n if i % 100 == 0:\n print('组合数{}: 第{}个样本 --- 标签{},浓度比{}'.format(n_class, i, label, concen_vector))\n df = pd.concat(samples, axis=1)\n return df.values.T, np.array(_to_vectors(df.columns.tolist()))" ]
[ "0.6000258", "0.56762284", "0.56762284", "0.55965334", "0.54399467", "0.54305595", "0.5391821", "0.5380843", "0.53685206", "0.5361804", "0.53427804", "0.5326272", "0.52933216", "0.5286579", "0.5269808", "0.5198982", "0.5191929", "0.5165977", "0.5149848", "0.51354104", "0.5111973", "0.5103631", "0.5088575", "0.5079822", "0.5057541", "0.5044703", "0.503962", "0.5026708", "0.50149685", "0.5001102", "0.49931955", "0.49701577", "0.49678722", "0.49540147", "0.49428198", "0.48771575", "0.4865609", "0.48618135", "0.4859726", "0.4821807", "0.481243", "0.48108605", "0.480629", "0.4801108", "0.47851115", "0.4780113", "0.47728455", "0.47516915", "0.47485596", "0.47385815", "0.4733974", "0.47334322", "0.4730322", "0.4726125", "0.4712412", "0.46979672", "0.46919554", "0.46917692", "0.46884272", "0.4685311", "0.4679616", "0.4674461", "0.4646806", "0.4644895", "0.46429965", "0.4642535", "0.463722", "0.46134314", "0.46130037", "0.46108103", "0.46097714", "0.46077675", "0.4598502", "0.45833522", "0.4575449", "0.4571933", "0.45707572", "0.45688882", "0.45667025", "0.45584595", "0.45445967", "0.45410895", "0.45288804", "0.4526708", "0.45243093", "0.45217183", "0.45202073", "0.45159668", "0.45115772", "0.44989476", "0.44878617", "0.44861823", "0.44751632", "0.44586343", "0.44512695", "0.44485974", "0.44443297", "0.44421118", "0.44385096", "0.44336095" ]
0.62257254
0
Convert a discrete PDF into a discrete CDF.
def pmf2cdf(pdf: np.ndarray) -> np.ndarray: cdf = np.cumsum(pdf) return cdf / cdf[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cdf_to_pdf(cdf):\n pdf = deepcopy(cdf)\n pdf[1:] -= pdf[:-1].copy()\n return pdf", "def pdf(self,x):\n return self.categoricalDist.pdf(x)", "def pdf(self,x):\n if x in self.values:\n pdfValue = self.mapping[x]\n else:\n if self.isFloat:\n vals = sorted(list(self.values))\n idx = bisect.bisect(vals, x)\n pdfValue = self.mapping[list(vals)[idx]]\n else:\n self.raiseAnError(IOError,'Categorical distribution cannot calculate pdf for ' + str(x))\n return pdfValue", "def contrast_pdf(contdc, contdc_sigma, dc_tru, dc_sigma, contrast_axis, npts=8000, display=False):\n\n dc_axis = np.linspace(dc_tru - 8 * dc_sigma, dc_tru + 8 * dc_sigma, npts)\n dc_mesh, contrast_mesh = np.meshgrid(dc_axis, contrast_axis)\n contdc_mesh = dc_mesh * contrast_mesh\n\n pdf_contdc = scipy.stats.rice.pdf(contdc_mesh, contdc / contdc_sigma, scale=contdc_sigma, loc=0.)\n pdf_dc, _ = norm_pdf(dc_tru, dc_sigma, x=dc_mesh)\n joint_pdf = pdf_contdc * pdf_dc\n\n # normalise joint PDF\n area = np.trapz(np.trapz(joint_pdf, contdc_mesh, axis=0), dc_axis)\n joint_pdf /= area\n\n # calculate the ratio pdf\n integrand = abs(dc_mesh) * joint_pdf\n contrast_pdf = np.trapz(integrand, dc_mesh, axis=1)\n\n if display:\n plt.figure()\n plt.imshow(pdf_contdc)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(pdf_dc)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(joint_pdf)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(integrand)\n plt.colorbar()\n\n plt.figure()\n plt.plot(contrast_axis, contrast_pdf)\n\n plt.show()\n\n return contrast_pdf", "def pdf_from_cdf(data, idx, what):\n\n cdf = data[what + '_sum'].cumsum() / data[what + '_sum'].sum()\n cdfi = scipy.interpolate.interp1d(cdf.index, cdf, 'linear', bounds_error=False)(idx)\n pdfi = np.hstack((cdfi[0], np.diff(cdfi) / np.diff(idx)))\n return pdfi", "def _convertDistrPointsToCdf(self,pts):\n try:\n return self.cdf(pts.real)\n except TypeError:\n return list(self.cdf(x) for x in pts)", "def cdf(self,x):\n if self.functionType == 'cdf':\n cdfValue = self.cdfFunc(x)\n else:\n cdfValue = self.pdfFunc.integral(self.data[0][0],x)\n return cdfValue", "def cdf(self,x):\n return self.categoricalDist.cdf(x)", "def c_pdf(self, x):\n\n assert x > 0\n\n # shortcut\n shape = self.shape\n loc = self.loc\n scale = self.scale\n xn = np.subtract(x, loc) / scale\n\n # update x\n ft = shape * xn ** (shape - 1) * np.exp(-xn ** shape)\n return ft / scale", "def cdf(x, point):\n raise NotImplementedError(\"The cdf method has not yet been implemented.\")", "def CDFconvertToDistr(self,pts):\n return self._convertCdfPointsToDistr(self._convertStdPointsToCdf(pts))", "def convert_pdf(pdf_path):\n with Image(filename=pdf_path, resolution=300, format=\"pdf\") as pdf:\n pdf.convert('tiff')\n pdf.save(filename='./data/raw/full.tiff')", "def get_ccdf(degseq):\n uniques, counts = np.unique(degseq, return_counts=True)\n cumprob = np.cumsum(counts).astype(np.double) / (degseq.size)\n return uniques[::-1], (1. - cumprob)[::-1]", "def _pdf(self):\n # LOG: processing_type property\n self.set_property('processing_type', 'pdf')\n xmlDoc = PDFiD(self.src_path)\n oPDFiD = cPDFiD(xmlDoc, True)\n # TODO: are there other characteristics which should be dangerous?\n if oPDFiD.encrypt.count > 0:\n self.make_dangerous('encrypted pdf')\n if oPDFiD.js.count > 0 or oPDFiD.javascript.count > 0:\n self.make_dangerous('pdf with javascript')\n if oPDFiD.aa.count > 0 or oPDFiD.openaction.count > 0:\n self.make_dangerous('openaction')\n if oPDFiD.richmedia.count > 0:\n self.make_dangerous('flash')\n if oPDFiD.launch.count > 0:\n self.make_dangerous('launch')", "def pdf_comp(self, x, cid, log = False):\n if self.mode == 'diag':\n va = self.va[cid]\n elif self.mode == 'full':\n va = self.va[cid*self.d:(cid+1)*self.d]\n else:\n raise GmParamError(\"\"\"var mode %s not supported\"\"\" % self.mode)\n\n if log:\n return D.gauss_den(x, self.mu[cid], va, log = True) \\\n + N.log(self.w[cid])\n else:\n return D.multiple_gauss_den(x, self.mu[cid], va) * self.w[cid]", "def pdf(self,x):\n if self.transformation:\n pdfValue = self.pdfInTransformedSpace(x)\n else:\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue", "def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue", "def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue", "def pdf(self, x):\n raise NotImplementedError", "def icdf(self, value):\n return self._normal.icdf(value)", "def _convertCdfPointsToDistr(self,pts):\n try:\n return self.ppf(pts.real)\n except TypeError:\n return list(self.ppf(x) for x in pts)", "def _compute_single_pdf(self, **kwargs):\n raise NotImplementedError", "def pdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue", "def pdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue", "def pdf(self,x):\n returnPdf = self._distribution.pdf(x)\n return returnPdf", "def _dncb_pdf(x, a1, a2, mu1, mu2):\n out = st.beta.pdf(x, a1, a2, loc=0)\n out *= np.exp(-mu1-mu2)\n out *= hchg(x, a1, a2, mu1, mu2)\n return out", "def cdf(self, value):\n cdf = torch.where(\n value < 1., \n self.base.cdf(value), \n torch.ones_like(value) # all of the mass\n )\n cdf = torch.where(value < 0., torch.zeros_like(cdf), cdf)\n return cdf", "def cdf(self,x):\n if self.method == 'spline':\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n else:\n self.raiseAnError(NotImplementedError,'cdf not yet implemented for ' + self.method + ' method')\n return cdfValue", "def _convert(self):\n logger.info(\"Converting conformers to density\")\n logger.debug(\"Masking\")\n self._transformer.reset(full=True)\n for n, coor in enumerate(self._coor_set):\n self.conformer.coor = coor\n self._transformer.mask(self._rmask)\n mask = self._transformer.xmap.array > 0\n self._transformer.reset(full=True)\n\n nvalues = mask.sum()\n self._target = self.xmap.array[mask]\n logger.debug(\"Density\")\n nmodels = len(self._coor_set)\n self._models = np.zeros((nmodels, nvalues), float)\n for n, coor in enumerate(self._coor_set):\n self.conformer.coor = coor\n self.conformer.b = self._bs[n]\n self._transformer.density()\n model = self._models[n]\n model[:] = self._transformer.xmap.array[mask]\n np.maximum(model, self.options.bulk_solvent_level, out=model)\n self._transformer.reset(full=True)", "def pdf(self):\n\n pdf = PDF(self.valuesArray)\n return pdf.axes[0], pdf.pdf", "def convertToDiscreteFunction(boolean: bool) -> cern.japc.value.DiscreteFunction:\n ...", "def encode_pdf(pdf):\n count = len(pdf)\n pdf = map(lambda x: '(' + str(x[0]) + ', ' + str(x[1]) + ')', pdf)\n pdf = '[' + ', '.join(pdf) + ']'\n return pdf", "def cdf(self, value):\n return self._normal.cdf(value)", "def check_cdfIntegrity(self, step):\n # Selecting bins automatically:\n x_max = self.onpower_train.max().values[0]\n x_min = 0\n step = 1\n x_onpower = np.arange(x_min, x_max, step).reshape(-1, 1)\n\n x_max = 0\n x_min = self.offpower_train.min().values[0]\n step = 1\n x_offpower = np.arange(x_min, x_max, step).reshape(-1, 1)\n\n x_max = self.duration_train.max().values[0]\n x_min = 0\n step = 1\n x_duration = np.arange(x_min, x_max, step).reshape(-1, 1)\n\n # Evaluating score for:\n # Onpower\n y_onpower = self.__pdf2(self.onpower, x_onpower)\n print(\"Onpower cdf: \" + str(y_onpower.sum()))\n\n # Offpower\n y_offpower = self.__pdf2(self.offpower, x_offpower)\n print(\"Offpower cdf: \" + str(y_offpower.sum()))\n\n # duration\n y_duration = self.__pdf2(self.duration, x_duration)\n print(\"Duration cdf: \" + str(y_duration.sum()))\n\n # Plots:\n # fig1 = plt.figure()\n # ax1 = fig1.add_subplot(311)\n # ax2 = fig1.add_subplot(312)\n # ax3 = fig1.add_subplot(313)\n\n # ax1.plot(x_onpower, y_onpower)\n # ax1.set_title(\"PDF CDF: Onpower\")\n # ax1.set_ylabel(\"density\")\n # ax1.set_xlabel(\"Watts\")\n\n # ax2.plot(x_offpower, y_offpower)\n # ax2.set_title(\" PDF CDF: Offpower\")\n # ax2.set_ylabel(\"denisty\")\n # ax2.set_xlabel(\"Watts\")\n\n # ax3.plot(x_duration, y_duration)\n # ax3.set_title(\"PDF CDF: Duration\")\n # ax3.set_ylabel(\"density\")\n # ax3.set_xlabel(\"Seconds\")", "def pdf(self, value=None):\n if value is None:\n value = self.value\n return self.rv.pdf(\n value, *self._pymc_dists_to_value(self.args), **self.kwds\n )", "def do_single_file_preprocess(pdf_file):", "def process_pdf(pdf):\n\n if os.path.exists(legend_images_dir):\n subprocess.call([\"rm\", \"-rf\", legend_images_dir])\n os.makedirs(legend_images_dir)\n\n if os.path.exists(plot_images_dir):\n subprocess.call([\"rm\", \"-rf\", plot_images_dir])\n os.makedirs(plot_images_dir)\n\n if os.path.exists(csv_output_dir):\n subprocess.call([\"rm\", \"-rf\", csv_output_dir])\n os.makedirs(csv_output_dir)\n\n if os.path.exists(pdf_output_dir):\n subprocess.call([\"rm\", \"-rf\", pdf_output_dir])\n os.makedirs(pdf_output_dir)\n\n genImages(pdf)", "def pdf(self,x):\n pdfValue = self.pdfFunc(x)\n return pdfValue", "def pdf(self, grid, dataSegment):\n return self.density(dataSegment[0], *grid)", "def extract(self, pdf_path, output_directory):\n figure_extraction = FigureExtraction(\n pdf_path=pdf_path,\n parent_directory=output_directory)\n\n # create the extraction results directory\n os.makedirs(figure_extraction.paths['BASE'])\n\n # copy the PDF into the extraction results directory\n shutil.copy(pdf_path, figure_extraction.paths['PDF_PATH'])\n\n pdf_renderer = settings_utils.import_setting(\n settings.DEEPFIGURES_PDF_RENDERER)()\n\n # render the PDF into low-res images\n figure_extraction.low_res_rendering_paths = \\\n pdf_renderer.render(\n pdf_path=figure_extraction.paths['PDF_PATH'],\n output_dir=figure_extraction.paths['BASE'],\n dpi=settings.DEFAULT_INFERENCE_DPI)\n\n # render the PDF into hi-res images\n figure_extraction.hi_res_rendering_paths = \\\n pdf_renderer.render(\n pdf_path=figure_extraction.paths['PDF_PATH'],\n output_dir=figure_extraction.paths['BASE'],\n dpi=settings.DEFAULT_CROPPED_IMG_DPI)\n\n # extract captions from PDF using pdffigures2\n figure_extraction.pdffigures_output_path = \\\n pdffigures_wrapper.pdffigures_extractor.extract(\n pdf_path=figure_extraction.paths['PDF_PATH'],\n output_dir=figure_extraction.paths['BASE'])\n\n # run deepfigures / neural networks on the PDF images\n figure_extraction.deepfigures_json_path = \\\n detection.extract_figures_json(\n pdf_path=figure_extraction.paths['PDF_PATH'],\n page_image_paths=figure_extraction.low_res_rendering_paths,\n pdffigures_output=figure_extraction.pdffigures_output_path,\n output_directory=figure_extraction.paths['BASE'])\n\n return figure_extraction", "def cdf(self,x):\n if hasattr(x,'__len__'):\n returnCdf = np.array([self.cdf(i) for i in x])\n else:\n returnCdf = self._distribution.cdf(x)\n return returnCdf", "def pdfInTransformedSpace(self,x):\n if self.method == 'pca':\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfInTransformedSpace = self._distribution.pdfInTransformedSpace(coordinate)\n else:\n self.raiseAnError(NotImplementedError,'ppfTransformedSpace not yet implemented for ' + self.method + ' method')\n return pdfInTransformedSpace", "def convertPDF(pdf_path, codec='ascii'):\n \n if pdf_path[:4] == 'http':\n print 'first downloading %s ...' % (pdf_path,)\n urllib.urlretrieve(pdf_path, 'temp.pdf')\n pdf_path = 'temp.pdf'\n \n rsrcmgr = PDFResourceManager()\n retstr = StringIO()\n laparams = LAParams()\n device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)\n \n fp = file(pdf_path, 'rb')\n process_pdf(rsrcmgr, device, fp)\n fp.close()\n device.close()\n \n str = retstr.getvalue()\n retstr.close()\n \n return str", "def pdf(self, grid, dataSegment):\n temp = grid[0][:] # make copy of parameter grid\n temp[temp > 1.] = 0. # p < 1\n temp[temp < 0.] = 0. # p > 0\n\n if dataSegment[0]:\n pass # pdf = p\n else:\n temp = 1. - temp # pdf = 1 - p\n\n return temp", "def readPDF(infile, width, grayscale=True): \n\n #To open a pdf file.\n imgAllPages = convert_from_path(infile, dpi=100)\n img = imgAllPages[0] #pick first page up\n img = np.asarray(img)\n img = img.take([1,2,0], axis=2) #change color ch. (GBR -> RGB)\n \n #To scale image to designated width.\n if img.shape[1] != width:\n height = int(round(img.shape[0] / img.shape[1] * width))\n img = cv2.resize(img, (width, height), \n interpolation = cv2.INTER_CUBIC)\n\n #To convert image in grayscale. \n if grayscale:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n return img \n #}}}", "def crp_to_dcm(q):\n s = q @ q\n return (1/(1 + s))*((1 - s)*np.identity(3) + 2*np.outer(q, q) - 2*ut.cross_product_operator(q))", "def cdf(self,x):\n if self.base == 'natural':\n cdfValue = (math.log(x)-self.lowerBound)/(self.upperBound-self.lowerBound)\n else:\n cdfValue = (math.log10(x)-self.lowerBound)/(self.upperBound-self.lowerBound)\n return cdfValue", "def pdf(self):\n\n pdf = PDF(self.valuesArray1, self.valuesArray2)\n\n return np.transpose(\n [*(lambda axes: [axes[:, -1], axes[:, -2]])( # invert axes order\n (pdf.extended_axes.reshape(np.prod(pdf.pdf.shape), 2))),\n pdf.pdf.flatten()])", "def _build_ppdf(self,pdf_dset,renormalize):\n\n if (not hasattr(self,'u')) or (not hasattr(self,'w')) or (not hasattr(self,'sfr')):\n raise AttributeError(\"axes are not set. Call set_axes() first\")\n\n dbinsq = self.dlogcs*self.dlogvout\n\n # Momentum flux PDF\n etaM = pdf_dset['etaM'] # in Msun/kpc^2/yr\n etap = self._etap(self.sfr) # in (Msun*km/s)/kpc^2/yr\n pdf_dset['etap'] = etap\n\n pfact = (self.vout**2+self.cs**2)/(self.vp*self.vout)\n ppdfc = etaM/etap*pdf_dset['Mpdf-cool']*pfact\n ppdfh = etaM/etap*pdf_dset['Mpdf-hot']*pfact\n ppdf = ppdfc + ppdfh\n\n if renormalize:\n renorm = ppdf.sum(dim=['logcs','logvout'])*dbinsq\n ppdfc = ppdfc/renorm\n ppdfh = ppdfh/renorm\n ppdf = ppdf/renorm\n pdf_dset['p_renorm'] = renorm\n\n pdf_dset['ppdf-cool'] = ppdfc\n pdf_dset['ppdf-hot'] = ppdfh\n pdf_dset['etap-cool'] = pdf_dset['etap']*ppdfc.sum(dim=['logcs','logvout'])*dbinsq\n pdf_dset['etap-hot'] = pdf_dset['etap']*ppdfh.sum(dim=['logcs','logvout'])*dbinsq\n pdf_dset['ppdf'] = ppdf", "def cdf(self,x):\n sortedMapping = sorted(self.mapping.items(), key=operator.itemgetter(0))\n if x == sortedMapping[-1][0]:\n return 1.0\n if x in self.values:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if x == ( float(element[0]) if self.isFloat else element[0] ):\n return cumulative\n else:\n if self.isFloat:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if x >= element[0]:\n return cumulative\n # if we reach this point we must error out\n self.raiseAnError(IOError,'Categorical distribution cannot calculate cdf for ' + str(x))", "def convert_pdf2image(pdf_path_obj):\n pdf = convert_from_path(str(pdf_path_obj))\n\n for index, page in enumerate(pdf):\n image_path = image_dir / Path(\n pdf_path_obj.stem + \"_\" + \"{:0>4}\".format(index) + \".png\"\n )\n page.save(str(image_path), \"png\")", "def generate_ctf(p):\n\tfrom EMAN2 import EMAN2Ctf\n\n\tdefocus = p[0]\n\tcs = p[1]\n\tvoltage = p[2]\n\tpixel_size = p[3]\n\tbfactor = p[4]\n\tamp_contrast = p[5]\n\t\n\tif defocus > 100: # which means it is very likely in Angstrom, therefore we are using the old convention\n\t\tdefocus *= 1e-4\n\t\n\tif amp_contrast < 1.0:\n\t\tfrom math import sqrt\n\t\tamp_contrast = amp_contrast*100/sqrt(2*amp_contrast**2-2*amp_contrast+1)\n\n\tctf = EMAN2Ctf()\n\tif(len(p) == 6):\n\t\tctf.from_dict({\"defocus\":defocus, \"cs\":cs, \"voltage\":voltage, \"apix\":pixel_size, \"bfactor\":bfactor, \"ampcont\":amp_contrast})\n\telse:\n\t\tctf.from_dict({\"defocus\":defocus, \"cs\":cs, \"voltage\":voltage, \"apix\":pixel_size, \"bfactor\":bfactor, \"ampcont\":amp_contrast,'dfdiff':p[6],'dfang':p[7]})\n\t\t\n\treturn ctf", "def convertToDiscreteFunction(doubleArray: typing.List[float]) -> cern.japc.value.DiscreteFunction:\n ...", "def cdf(self, points):\n if self._y_cdf is not None:\n x = points[:, 0]\n y = points[:, 1]\n\n # map the y coordinate first.\n y_out = self._y_cdf(y)\n\n # select which x quantile curve to use.\n x_curve = (y_out - self.y_min) * self.y_res / (self.y_max - self.y_min)\n x_curve = np.floor(x_curve).astype(\"int\")\n\n # map the x coordinate.\n x_range = np.arange(x.shape[0])\n x_out = np.zeros_like(x)\n for i in range(self.y_res):\n mask = x_curve == i\n x_out[x_range[mask]] = self._x_cdfs[i](x[mask])\n\n x_out = tf.cast(x_out, dtype=points.dtype)\n y_out = tf.cast(y_out, dtype=points.dtype)\n return np.column_stack((x_out, y_out))\n else:\n raise RuntimeError(\n \"CumulativeDensityFunction: Must call compute() with the correct \"\n \"direction before evaluation.\"\n )", "def cdf_discretize(self,variables=[]):\n #the errors in the code are due to the deleted files that require packages to be installed on the computer\n for i in variables:\n x=unique(self.data[:,i])\n m=max(x)-min(x)\n f=lambda x0,y0: array([m*(x0+y0)/(1+m**2), (x0*m+y0)/(1+m**2)])\n cdf=array([np.sum(self.data[:,i]<=t) for t in x])\n d=array([norm(array([x0,cdf[k]])-f(x0,cdf[k])) for k,x0 in\\\n enumerate(x)])", "def point_to_cdf(self, point):\n geomstats.errors.check_belongs(point, self)\n point = gs.to_ndarray(point, to_ndim=2)\n return lambda x: self.cdf(x, point)", "def compute_clade_probabilities():\n command = \"phyltr cat -b 10 indoeuropean.nex | phyltr clades > clades.txt\"\n subprocess.call(command, shell=True)", "def generate_pdf(pdf_data):\n\n html = HTML(string=pdf_data)\n f = html.write_pdf()\n\n return f", "def pdf(self, grid, dataSegment):\n return (grid[0] ** dataSegment[0]) * (np.exp(-grid[0])) / (np.math.factorial(dataSegment[0]))", "def processedPdf(self, grid, dataSegment):\n # if self.multipyLikelihoods == True, multi-dimensional data is processed one dimension at a time;\n # likelihoods are then multiplied\n if len(dataSegment.shape) == 2 and self.multiplyLikelihoods:\n return np.prod(np.array([self.processedPdf(grid, d) for d in dataSegment.T]), axis=0)\n\n # check for missing data\n if np.isnan(dataSegment).any():\n return np.ones_like(grid[0]) # grid of ones does not alter the current prior distribution\n\n return self.pdf(grid, dataSegment)", "def pdf(self,x):\n if self.base == 'natural':\n pdfValue = 1./(self.upperBound-self.lowerBound) * 1./x\n else:\n pdfValue = 1./(self.upperBound-self.lowerBound) * 1./x * 1./math.log(10.)\n return pdfValue", "def convertDF(fcs, sample_number=0):\r\n # Create an FCSdata object holding data from the FCS file\r\n FCSdata = convertFCS(fcs, sample_number)\r\n # Convert the cleaned FCS data into a PANDAS dataframe\r\n # Clean up the df by converting to int since values after decimal won't change the analysis\r\n df = pd.DataFrame(FCSdata.data, columns=FCSdata.channel_names).astype(int)\r\n return df", "def distributions(\n data,\n filename: str,\n continuous_kind: str = \"count\",\n nrows: int = 4,\n ncols: int = 3,\n quality: str = \"medium\",\n variables: Optional[List[str]] = None,\n sort: bool = True,\n):\n # Limit variables\n if variables is not None:\n data = data[variables]\n\n # Check filename, adding \".pdf\" if needed\n if type(filename) == str:\n filename = Path(filename)\n if filename.suffix != \"pdf\":\n filename = Path(str(filename) + \".pdf\")\n\n # Set DPI\n dpi_dict = {\"low\": 150, \"medium\": 300, \"high\": 1200}\n dpi = dpi_dict.get(quality, None)\n if dpi is None:\n raise ValueError(f\"quality was set to '{quality}' which is not a valid value\")\n\n # Make sure file is writeable\n try:\n with PdfPages(filename) as pdf:\n pass\n except OSError:\n raise OSError(f\"Unable to write to '{filename}'\")\n\n with PdfPages(filename) as pdf:\n # Determine the number of pages\n plots_per_page = nrows * ncols\n total_pages = (len(data.columns) + (plots_per_page - 1)) // plots_per_page\n click.echo(\n f\"Generating a {total_pages} page PDF for {len(data.columns):,} variables\"\n )\n # Starting plot space\n page_num = 1\n row_idx = 0\n col_idx = 0\n # Loop through all variables\n if sort:\n variables = sorted(list(data))\n else:\n variables = list(data)\n for variable in variables:\n if row_idx == 0 and col_idx == 0:\n # New Page\n _ = plt.subplots(squeeze=False, figsize=(8.5, 11), dpi=dpi)\n plt.suptitle(f\"Page {page_num}\")\n # Plot non-NA values and record the number of those separately (otherwise they can cause issues with generating a KDE)\n ax = plt.subplot2grid((nrows, ncols), (row_idx, col_idx))\n if str(data.dtypes[variable]) == \"category\": # binary and categorical\n sns.countplot(x=data.loc[~data[variable].isna(), variable], ax=ax)\n else:\n if continuous_kind == \"count\":\n sns.distplot(\n x=data.loc[~data[variable].isna(), variable],\n kde=False,\n norm_hist=False,\n hist_kws={\"alpha\": 1},\n ax=ax,\n )\n elif continuous_kind == \"box\":\n sns.boxplot(x=data.loc[~data[variable].isna(), variable], ax=ax)\n elif continuous_kind == \"violin\":\n sns.violinplot(x=data.loc[~data[variable].isna(), variable], ax=ax)\n elif continuous_kind == \"qq\":\n # QQ plots have to be sub-sampled otherwise there are too many points and the pdf is blank\n d = data.loc[~data[variable].isna(), variable]\n if len(d) > 400:\n d = d.sample(n=400, random_state=1)\n qqplot(d, line=\"s\", fit=True, ax=ax, color=\"steelblue\", alpha=0.7)\n else:\n raise ValueError(\n \"Unknown value for 'continuous_kind': must be one of {'count', 'box', 'violin', 'qq'}\"\n )\n # Update xlabel with NA information\n na_count = data[variable].isna().sum()\n ax.set_xlabel(\n f\"{variable}\\n{na_count:,} of {len(data[variable]):,} are NA ({na_count / len(data[variable]):.2%})\"\n )\n # Move to next plot space\n col_idx += 1\n if col_idx == ncols: # Wrap to next row\n col_idx = 0\n row_idx += 1\n if row_idx == nrows: # Wrap to next page\n row_idx = 0\n page_num += 1\n # Save the current page\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n pdf.savefig()\n plt.close()\n # Save final page, unless a full page was finished and the page_num is now more than total_pages\n if page_num == total_pages:\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n pdf.savefig()\n plt.close()\n # Add metadata\n d = pdf.infodict()\n d[\"Title\"] = \"Multipage PDF Example\"\n d[\"Author\"] = f\"CLARITE {clarite_version}\"\n d[\"Subject\"] = \"Distribution plots\"\n d[\"CreationDate\"] = datetime.datetime.today()\n d[\"ModDate\"] = datetime.datetime.today()", "def dcm_to_crp(dcm):\n c = np.trace(dcm) + 1\n return (1/c)*np.array([dcm[1, 2] - dcm[2, 1], dcm[2, 0] - dcm[0, 2], dcm[0, 1] - dcm[1, 0]])", "def pdf_preprocess(pdf):\n path = Path(pdf)\n if path.exists():\n # a filepath is provided, read and encode\n with path.open(\"rb\") as f:\n return base64.b64encode(f.read()).decode(\"utf-8\")\n else:\n # assume pdf is already b64 encoded\n return pdf", "def convert_pdf_to_image(source: str, dpi: int, output_folder: str) -> None:\n logger.info('Starting conversion')\n pages = convert_from_path(source, dpi)\n number = 0\n for page in pages:\n filename = os.path.join(output_folder, ''.join([str(number), '.jpg']))\n page.save(filename)\n logger.info(f'Processed {number} of {len(pages)}')\n number += 1\n logger.info('Finished conversion')", "def pdf(x):\n return lambda point: self.information_manifold.point_to_pdf(point)(x)", "def p2cd(p, N):\n a = p2a(p, N) # OK\n (r, c) = a2rc(a, N) # OK\n return rc2cd(r, c, N)", "def point_to_pdf(self, point):\n geomstats.errors.check_belongs(point, self)\n point = gs.to_ndarray(point, to_ndim=2)\n return lambda x: self.pdf(x, point)", "def make_kde_plot(x, pdf):\n\n fig = plt.figure(figsize=(768/96, 400/96), dpi=9)\n ax = plt.gca()\n ax.plot(x, pdf)\n ax.fill_between(x, pdf, alpha=.5)\n\n # Formatting\n plt.xlabel('Hourly rate ($)', fontsize=18)\n plt.xticks(fontsize=12)\n plt.ylabel('Number of tutors', fontsize=18)\n plt.yticks(fontsize=12)\n plt.title(\"Pricing distribution for similar tutors\", fontsize=24)\n plt.tight_layout()\n plt.show()\n\n # Save file to variable instead of writing to disk.\n img_io = StringIO()\n plt.savefig(img_io, dpi=96, format='png')\n img_io.seek(0)\n\n return img_io", "def convert_pdf_to_text(pdf_path):\n process_id = os.getpid()\n resource_manager = PDFResourceManager()\n output = StringIO.StringIO()\n laparams = LAParams(detect_vertical=True)\n device = TextConverter(\n resource_manager,\n output,\n codec='utf-8',\n laparams=laparams\n )\n interpreter = PDFPageInterpreter(resource_manager, device)\n file_handler = file(pdf_path, 'rb')\n pages = PDFPage.get_pages(file_handler)\n\n for idx, page in enumerate(pages):\n print(\"Page \" + str(idx + 1), end='\\r')\n sys.stdout.flush()\n interpreter.process_page(page)\n print()\n\n data = output.getvalue()\n data = data.replace('\\n', ' ')\n data = data.replace('\\t', ' ')\n data = data.replace('\\r', ' ')\n data = data.replace('\\x0c', ' ')\n\n return data", "def CDFconvertToQuad(self,pts):\n return self._convertCdfPointsToStd(self._convertDistrPointsToCdf(pts))", "def tabulate_pdf(self):\n\n from mitsuba.core import Float, Vector2f, ScalarVector2f\n\n extents = self.bounds.extents()\n endpoint = self.bounds.max - extents / ScalarVector2f(self.res)\n\n # Compute a set of nodes where the PDF should be evaluated\n x, y = ek.meshgrid(\n ek.linspace(Float, self.bounds.min.x, endpoint.x, self.res.x),\n ek.linspace(Float, self.bounds.min.y, endpoint.y, self.res.y)\n )\n\n endpoint = extents / ScalarVector2f(self.res)\n eps = 1e-4\n nx = ek.linspace(Float, eps, endpoint.x * (1 - eps), self.ires)\n ny = ek.linspace(Float, eps, endpoint.y * (1 - eps), self.ires)\n wx = [1 / (self.ires - 1)] * self.ires\n wy = [1 / (self.ires - 1)] * self.ires\n wx[0] = wx[-1] = wx[0] * .5\n wy[0] = wy[-1] = wy[0] * .5\n\n integral = 0\n\n self.histogram_start = time.time()\n for yi, dy in enumerate(ny):\n for xi, dx in enumerate(nx):\n xy = self.domain.map_forward(Vector2f(x + dx, y + dy))\n pdf = self.pdf_func(xy)\n integral = ek.fmadd(pdf, wx[xi] * wy[yi], integral)\n self.histogram_end = time.time()\n\n self.pdf = integral * (ek.hprod(extents / ScalarVector2f(self.res))\n * self.sample_count)\n\n # A few sanity checks\n pdf_min = ek.hmin(self.pdf) / self.sample_count\n if not pdf_min >= 0:\n self._log('Failure: Encountered a cell with a '\n 'negative PDF value: %f' % pdf_min)\n self.fail = True\n\n self.pdf_sum = ek.hsum(self.pdf) / self.sample_count\n if self.pdf_sum > 1.1:\n self._log('Failure: PDF integrates to a value greater '\n 'than 1.0: %f' % self.pdf_sum)\n self.fail = True", "def _densityctr(self, rangex, rangey, dim = misc.DEF_VIS_DIM):\n gr = N.meshgrid(rangex, rangey)\n x = gr[0].flatten()\n y = gr[1].flatten()\n xdata = N.concatenate((x[:, N.newaxis], y[:, N.newaxis]), axis = 1)\n dmu = self.mu[:, dim]\n dva = self._get_va(dim)\n den = GM.fromvalues(self.w, dmu, dva).pdf(xdata, log = True)\n den = den.reshape(len(rangey), len(rangex))\n\n return gr[0], gr[1], den", "def cohensd2problarger(d):\n\n return stats.norm.cdf(d / np.sqrt(2))", "def pdf(self, grid, dataSegment):\n return np.exp(-((dataSegment[1] - grid[0] * dataSegment[0]) ** 2.) / (2. * grid[1] ** 2.) - .5 * np.log(\n 2. * np.pi * grid[1] ** 2.))", "def cdf(self, x):\n\n pi = 3.1415926536\n mean = self.mean\n stddev = self.stddev\n\n x1 = (x - mean) / (stddev * (2 ** 0.5))\n\n erf1 = (2/pi**0.5)\n erf2 = (x1-((x1**3)/3)+((x1**5)/10)-((x1**7)/42)+((x1**9)/216))\n erf = erf1 * erf2\n cdf = (1/2)*(1+erf)\n\n return cdf", "def create_pdf(self):\n\n my_datetime = datetime.now()\n self.pdf_name = (\n self.pdf_name + \"_\" + my_datetime.strftime(\"%H%M_%d%m%Y\") + \".pdf\"\n )\n fig_width = aW * self.column_ratio[0]\n\n clm_width_meta = (aW * self.column_ratio[1]) / len(self.fields)\n\n c = canvas.Canvas(os.path.join(self.pdf_folder, self.pdf_name), pagesize=A4)\n\n for qc_run_id, fig_file in sorted(self._files.items()):\n (param_values, feature_values) = get_param_values(\n qc_run_id, self.db_name, return_meta_add_on=True\n )\n\n comment = self.subject + \"<br/>\"\n # c.saveState()\n title = \"Dataset \" + qc_run_id\n\n # Prepare header\n header = Paragraph(title, title_style)\n h_w, h_h = header.wrap(aW, aH)\n\n # Prepare image\n img = ImageReader(fig_file)\n im_width, im_height = img.getSize()\n aspect = im_height / float(im_width)\n fig_height = fig_width * aspect\n\n # Prepare metadata section\n\n meta_table = Table(\n param_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n meta_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n\n meta_width, meta_height = meta_table.wrap(aW - im_width, aH / 2)\n\n # Prepare comments header\n comments_header = Paragraph(\"Comments:\", title_style)\n avail_height = aH - fig_height - v_padding\n comm_h_width, comm_h_height = comments_header.wrap(\n im_width, avail_height # aW - meta_width,\n )\n # Prepare comments\n my_datetime = datetime.now()\n ts = \"Printed on \" + my_datetime.strftime(\"%c\")\n\n try:\n data_specific_comment = self.comments[int(qc_run_id)]\n comment += data_specific_comment + \"<br/>\"\n comment += self.comments[\"general\"] + \"<br/>\"\n\n comment += self.smalltalk + \"<br/>\"\n except Exception:\n logger.warning(\n \"Unable to summarize result of \" + \"dataset {}\".format(qc_run_id)\n )\n comment_ts = comment + ts\n comment_ts = textwrap.fill(comment_ts, 70)\n comment_ts = comment_ts.replace(\"\\n\", \"<br/>\")\n\n comments_p = Paragraph(comment_ts, body_style)\n\n avail_height = aH - fig_height - v_padding - comm_h_height\n\n comm_width, comm_height = comments_p.wrap(im_width, avail_height) # aW,\n\n line_widths = comments_p.getActualLineWidths0()\n number_of_lines = len(line_widths)\n if number_of_lines > 1:\n pass\n if number_of_lines == 1:\n min(line_widths)\n comm_width, comm_height = comments_p.wrap(im_width, avail_height)\n\n # Prepare features\n feat_table = Table(\n feature_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n feat_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n avail_height = aH - meta_height # fig_height - v_padding - comm_h_height\n avail_height -= comm_height\n feat_width, feat_height = feat_table.wrap(aW - im_width, avail_height)\n\n # Draw everyting on canvas\n\n header.drawOn(c, left_margin, aH - top_margin)\n\n c.drawImage(\n img,\n left_margin,\n aH - top_margin - fig_height - v_padding,\n width=fig_width * 1.1,\n height=fig_height * 1.1,\n mask=\"auto\",\n )\n\n meta_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2, # - v_padding\n )\n\n comments_header.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - fig_height\n - 2 * v_padding, # - add_on_height\n )\n\n comments_p.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - comm_height\n - fig_height\n - 2 * v_padding\n - comm_h_height, # - add_on_height\n )\n\n feat_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2 - feat_height - v_padding,\n # top_margin - fig_height - 2*v_padding - feat_height\n )\n\n # new page\n c.showPage()\n c.saveState()\n\n c.save()", "def pdf(self):\n norm_hist = self.complexity_histogram / self.complexity_histogram.sum()\n # Convert the Complexity pdf to an neo.AnalogSignal\n pdf = neo.AnalogSignal(\n np.expand_dims(norm_hist, axis=1),\n units=pq.dimensionless,\n t_start=0 * pq.dimensionless,\n sampling_period=1 * pq.dimensionless)\n return pdf", "def pdf(self, grid, dataSegment):\n return np.exp(-((dataSegment[0, 0] - grid[0]) ** 2.) / (2. * dataSegment[0, 1] ** 2.) -\n .5 * np.log(2. * np.pi * dataSegment[0, 1] ** 2.))", "def pdf(x, point):\n raise NotImplementedError(\"The pdf method has not yet been implemented.\")", "def from_pdf(path):\n raw_regexes = [\n r\"\"\"<prism:doi>(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)</prism:doi>\"\"\",\n r\"\"\"[\"'](?:doi|DOI):(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)[\"']\"\"\",\n r\"\"\"URI\\s*\\(https?://doi.org/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\\s*>\"\"\",\n r\"\"\"URI\\s*\\((?:https?://)?www.nature.com/doifinder/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\\s*>\"\"\",\n # This one works for some ACIE papers, but is too risky. It matches\n # against DOIs of cited papers too. Better to use WPS-ARTICLEDOI.\n # r\"\"\"/URI\\(https?://(?:dx)?.doi.org/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"/WPS-ARTICLEDOI\\s*\\((10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"\\((?:doi|DOI):\\s*(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"<rdf:li.+>(?:doi|DOI):(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)</rdf:li>\"\"\",\n ]\n regexes = [re.compile(regex) for regex in raw_regexes]\n class _DOIFound(Exception):\n pass\n\n p = Path(path)\n if not (p.exists() or p.is_file()):\n return _error(f\"from_pdf: invalid path '{p}' given\")\n\n strings = subprocess.Popen([\"strings\", p], stdout=subprocess.PIPE)\n grep = subprocess.Popen([\"grep\", \"-i\", \"doi\"], stdin=strings.stdout, stdout=subprocess.PIPE)\n try:\n for line in grep.stdout:\n line = line.decode(_g.gpe).strip()\n for regex in regexes:\n match = regex.search(line)\n if match:\n raise _DOIFound(match.group(1))\n except _DOIFound as e:\n doi = e.args[0]\n # Prune away any extra parentheses at the end.\n nopen = doi.count('(')\n nclose = doi.count(')')\n if nopen != nclose:\n doi = doi.rsplit(')', maxsplit=(nclose - nopen))[0]\n # Report success.\n return DOI(doi)\n else:\n return _error(f\"from_pdf: could not find DOI from '{p}'\")", "def cdfFunction(f, x, N):\r\n return ssstats.binom.cdf(x, N, f)", "def pdf(self, grid, dataSegment):\n return np.exp(-(dataSegment[0] ** 2.) / (2. * grid[0] ** 2.) - .5 * np.log(2. * np.pi * grid[0] ** 2.))", "def pdf_page_to_png(src_pdf, pagenum=0, resolution=154):\n\n #check_dependencies(__optional_dependencies__['pdf'])\n # Import libraries within this function so as to avoid import-time dependence\n\n dst_pdf = PyPDF2.PdfFileWriter()\n src_pdf = w(filename=src_pdf,resolution=300)\n dst_pdf.addPage(src_pdf.getPage(pagenum))\n\n pdf_bytes = io.BytesIO()\n dst_pdf.write(pdf_bytes)\n pdf_bytes.seek(0)\n\n img = Image(file=pdf_bytes, resolution=resolution)\n \n with img.convert('png') as converted:\n converted.save(filename='converted.png')\n return img", "def test_cdf(log_prob_coo):\n\n offset_dict = log_prob_coo['offsets']\n\n # the input\n print(log_prob_coo)\n print('input log probs')\n print(log_prob_sparse_to_dense(log_prob_coo['coo']))\n\n # with this shape converter, we get one row, where each value is one m\n converter = IndexConverter(total_n_cells=1,\n total_n_genes=log_prob_coo['coo'].shape[0])\n\n # set up and estimate\n estimator = ThresholdCDF(index_converter=converter)\n noise_csr = estimator.estimate_noise(noise_log_prob_coo=log_prob_coo['coo'],\n noise_offsets=offset_dict,\n q=0.5)\n\n # output\n print('dense noise count estimate, per m')\n out_per_m = np.array(noise_csr.todense()).squeeze()\n print(out_per_m)\n print('truth')\n print(log_prob_coo['cdfs'])\n\n # test\n np.testing.assert_array_equal(out_per_m, log_prob_coo['cdfs'])", "def processPdf(self, pdf_path: str) -> (list, list):\n hocr_list = []\n images = []\n numPages = self.getNumberPages(pdf_path)\n for initalpage in range(1, numPages+self.batch, self.batch):\n pages = pdf2image.convert_from_path(pdf_path,\n first_page=initalpage,\n last_page=min(\n initalpage+self.batch-1, numPages),\n output_folder=self.images_path,\n grayscale='true',\n fmt='tif')\n for page in pages:\n hocr_bytes = pytesseract.image_to_pdf_or_hocr(page, \n lang='por',\n extension='hocr',\n config='--psm 1')\n hocr_list.append(hocr_bytes)\n images.append(page.filename)\n page.close()\n return hocr_list, images", "def _create_pdf(self, survey, response):\n pdf_transformer = PDFTransformer(survey, response)\n self._pdf, self._page_count = pdf_transformer.render_pages()\n return self._pdf", "def cdf(array, figsize, color, label, xlabel, ylabel, title, textsize, xsize, ysize, loc):\r\n fig, ax = plt.subplots(figsize=figsize)\r\n x = np.sort(array)\r\n y = np.array(range(len(array)))/float(len(array))*100 \r\n ax.plot(x, y, color = color, label = label) # plot the CDF\r\n ax.set_title(title, weight = 'bold', size = textsize)\r\n ax.set_xlabel(xlabel, weight = 'bold', size = textsize)\r\n ax.set_ylabel(ylabel, weight = 'bold', size = textsize)\r\n plt.xticks(fontsize = xsize)\r\n plt.yticks(fontsize = ysize)\r\n plt.legend(loc = loc)", "def convert_doc_count_to_idf(self, df_of_dc_to_make_into_idf):\n num_transcripts = df_of_dc_to_make_into_idf.loc[self.__str_cheeky_document_counter]\n # in our case, because of the way we are constructing the set of terms\n # there should never be a term that has a document frequency of zero.\n # however, in general, if querying a new phrase using existing data,\n # in theory a term could have a document frequency of zero, so the general\n # practice is to add 1 to the document frequency, so that in the next\n # set, division by zero does not happen.\n df_of_dc_to_make_into_idf = df_of_dc_to_make_into_idf + 1\n # then we find the IDF (inverse document frequency)\n df_of_dc_to_make_into_idf = num_transcripts / df_of_dc_to_make_into_idf\n # then we find the log of that\n df_of_dc_to_make_into_idf = log(df_of_dc_to_make_into_idf)\n return df_of_dc_to_make_into_idf", "def process_pdf(filename, qualies_only=False):\n if filename.endswith('.txt'):\n f = open(filename)\n text = f.read()\n f.close()\n else:\n text = subprocess.check_output([\"pdftotext\", \"-layout\",\n filename, \"-\"]).decode('utf-8')\n\n print(\"Processing {}...\".format(filename))\n\n pages = text.split(chr(12))\n print (\"{} Pages\".format(len(pages)))\n md = []\n qd = []\n for p in pages:\n if ('MAIN DRAW SINGLES' in p or 'Singles Championship' in p\n or 'Ladies\\' Singles' in p):\n md += [p]\n elif ('QUALIFYING SINGLES' in p or 'Qualifying Singles' in p\n or 'Qualifying Ladies\\' Singles' in p):\n qd += [p]\n elif ('Qualifiers' in p and not 'Doubles' in p):\n qd += [p]\n\n md_result = None\n qd_result = None\n\n meta = None\n if md and not qualies_only:\n md_result = drawsheet_process(chr(12).join(md))\n meta = md_result[2]\n\n # copy the metadata to the quaily draw if possible\n if qd:\n qd_result = drawsheet_process(chr(12).join(qd), meta, True)\n\n return (md_result, qd_result)", "def make_joint_pdf(self, benchmark) :\n \n #distortion case 1 -- taxes/subsidy uncorrelated with firm size or benchmark case where no tax/subsidy at all\n if self.distortion_case == 1 or benchmark == 1 : \n self.joint_pdf = self.prod_pdf_matrix * self.policy_pdf \n \n #distortion case 2 -- tax/subsidy negatively correlated with firm size, subsidize only fraction self.subsidy_frac of lowest prod plants\n if self.distortion_case == 2:\n \n self.joint_pdf = np.zeros((self.Ns,self.ntau))\n prod_cdf = np.cumsum(self.prod_pdf) # cdf over the idiosyncratic draws of s\n I=np.where(prod_cdf <= self.subsidy_frac)\n self.joint_pdf[I,0]=self.prod_pdf[I] #take the lower part of the pdf over idiosyncratic draws of s\n \n #if there is excempt firms\n if self.excempt_frac>0:\n #take the indices of pdf for s for the interval sub and sub+nosub. \n I=np.where((prod_cdf > self.subsidy_frac) & (prod_cdf <= self.subsidy_frac + self.excempt_frac))\n self.joint_pdf[I,1] = self.prod_pdf[I]\n \n J=np.where(prod_cdf > self.excempt_frac + self.subsidy_frac)\n self.joint_pdf[J,2]=self.prod_pdf[J]\n \n \n #distortion case 3 -- tax/subsidy positively correlated with firm size, subsidize only fraction self.subsidy_frac of highest prod plants\n elif self.distortion_case == 3:\n \n self.joint_pdf = np.zeros((self.Ns,self.ntau))\n prod_cdf = np.cumsum(self.prod_pdf) # cdf over the idiosyncratic draws of s\n I=np.where(prod_cdf <= 1-self.subsidy_frac - self.excempt_frac)\n self.joint_pdf[I,2]=self.prod_pdf[I] #take the lower part of the pdf over idiosyncratic draws of s to tax\n \n #if there is excempt firms\n if self.excempt_frac>0:\n #take the indices of pdf for s for the interval sub and sub+nosub. \n I = np.where((prod_cdf > 1 - self.subsidy_frac - self.excempt_frac) & (prod_cdf <= 1 - self.subsidy_frac))\n self.joint_pdf [I,1] = self.prod_pdf[I]\n \n J=np.where(prod_cdf > 1 - self.subsidy_frac)\n self.joint_pdf[J,0] = self.prod_pdf[J]", "def pdf(self, grid, dataSegment):\n return np.exp(\n -((dataSegment[0] - grid[0]) ** 2.) / (2. * grid[1] ** 2.) - .5 * np.log(2. * np.pi * grid[1] ** 2.))", "def draw_pdf_contours(self, ax, dist, label=None, nlevels=200, subdiv=8, **kwargs):\r\n # Subdivide the triangle into a triangular mesh\r\n refiner = tri.UniformTriRefiner(self._triangle)\r\n trimesh = refiner.refine_triangulation(subdiv=subdiv)\r\n \r\n # convert to barycentric coordinates and compute probabilities of the given distribution \r\n pvals = [dist.pdf(self.xy2bc(xy)) for xy in zip(trimesh.x, trimesh.y)]\r\n \r\n ax.tricontourf(trimesh, pvals, nlevels, **kwargs)\r\n #plt.axis('equal')\r\n ax.set_xlim(0, 1)\r\n ax.set_ylim(0, 0.75**0.5)\r\n ax.set_title(str(label))\r\n ax.axis('off') \r\n return ax", "def get_pdf(self, points=None):\n if points is not None:\n return self.parent.pdf(points)\n else:\n raise ValueError( 'Please digit an input for getPDF method')", "def isDiscrete(self):\n return self._discrete", "def is_pdf(prediction):\n probs = [y for x, y in prediction.items()]\n\n distance = 1 - sum(probs) \n assert distance >= -0.001 \n if distance >= -0.001 and distance < 1:\n return True", "def plot_to_pdf(pdf_fname, cmts_directory, misfit_windows_collection, iterations_list, snr_threshold, event_depth):\n rep_key = sorted(misfit_windows_collection.keys())[0]\n all_events = sorted(misfit_windows_collection[rep_key].keys())\n with PdfPages(pdf_fname) as pdf:\n for each_event in tqdm.tqdm(all_events):\n # we should plot the beachball and plot the source parameter table here\n plot_source_parameters(\n each_event, pdf, cmts_directory, iterations_list)\n # prepare information to plot\n each_misfit_windows_collection = {}\n for each_iteration in iterations_list:\n each_misfit_windows_collection[each_iteration] = (\n misfit_windows_collection[each_iteration][each_event])\n event_depth_dict = event_depth[each_event]\n data_collection, category_phases, category_list = get_plotting_data(\n each_misfit_windows_collection, iterations_list, snr_threshold, event_depth_dict)\n for each_category, phase_list_for_each_category in zip(category_list, category_phases):\n # one page for each category\n figs = plt.figure(figsize=(50, 50))\n collecction_all = {}\n if (each_category != \"surface\"):\n collecction_all[\"deltat\"] = [np.array([], dtype=np.float)\n for i in range(len(iterations_list))]\n collecction_all[\"similarity\"] = [np.array([], dtype=np.float)\n for i in range(len(iterations_list))]\n collecction_all[\"cc\"] = [np.array([], dtype=np.float)\n for i in range(len(iterations_list))]\n # we plot for each phases\n for row_index, each_phase in enumerate(phase_list_for_each_category):\n # we plot for deltat,similarity,cc\n for column_index, plot_type in enumerate([\"deltat\", \"similarity\", \"cc\"]):\n # num must be 1 <= num <= num_max, not 0\n # keep different category's figsize the same\n ax = figs.add_subplot(\n 8, 3, row_index * 3 + column_index+1)\n\n for interation_index, each_iteration in enumerate(iterations_list):\n sns.distplot(data_collection[each_iteration][each_category][row_index]\n [plot_type], ax=ax, hist=False, label=f\"before iteration {each_iteration}\",\n kde_kws={\"linewidth\": 6})\n # collect to the category summary\n if(each_category != \"surface\"):\n if (column_index == 0):\n collecction_all[\"deltat\"][interation_index] = np.concatenate(\n (collecction_all[\"deltat\"][interation_index], data_collection[each_iteration][each_category][row_index]\n [plot_type]))\n elif (column_index == 1):\n collecction_all[\"similarity\"][interation_index] = np.concatenate(\n (collecction_all[\"similarity\"][interation_index], data_collection[each_iteration][each_category][row_index]\n [plot_type]))\n elif (column_index == 2):\n collecction_all[\"cc\"][interation_index] = np.concatenate(\n (collecction_all[\"cc\"][interation_index], data_collection[each_iteration][each_category][row_index]\n [plot_type]))\n if (plot_type == \"deltat\"):\n ax.set_xlim((-10, 10))\n elif(plot_type == \"similarity\"):\n ax.set_xlim((0, 1))\n elif(plot_type == \"cc\"):\n ax.set_xlim((0, 1))\n # ax.legend()\n if (column_index == 0):\n ax.get_yaxis().set_ticklabels([])\n ax.set_ylabel(each_phase, fontsize=50, rotation=90)\n else:\n ax.get_yaxis().set_ticklabels([])\n ax.tick_params(axis=\"x\", labelsize=30)\n if(plot_type != \"similarity\"):\n ax.set_xlabel(plot_type, fontsize=30)\n else:\n ax.set_xlabel(\"zero-lag cc\", fontsize=30)\n if (row_index == 0 and column_index == 1):\n ax.set_title(\n f\"gcmtid: {each_event}\\ncategory: {each_category}\", fontsize=50)\n if (each_category != \"surface\"):\n for column_index, plot_type in enumerate([\"deltat\", \"similarity\", \"cc\"]):\n ax = figs.add_subplot(\n 8, 3, (row_index+1) * 3 + column_index+1) # pylint: disable=undefined-loop-variable\n for interation_index, each_iteration in enumerate(iterations_list):\n sns.distplot(collecction_all[plot_type][interation_index], ax=ax, hist=False, label=f\"before iteration {each_iteration}\",\n kde_kws={\"linewidth\": 6})\n if (plot_type == \"deltat\"):\n ax.set_xlim((-10, 10))\n elif(plot_type == \"similarity\"):\n ax.set_xlim((0, 1))\n elif(plot_type == \"cc\"):\n ax.set_xlim((0, 1))\n if (column_index == 0):\n ax.get_yaxis().set_ticklabels([])\n ax.set_ylabel(\n \"all phases\", fontsize=50, rotation=90)\n else:\n ax.get_yaxis().set_ticklabels([])\n ax.tick_params(axis=\"x\", labelsize=30)\n if(plot_type != \"similarity\"):\n ax.set_xlabel(plot_type, fontsize=30)\n else:\n ax.set_xlabel(\"zero-lag cc\", fontsize=30)\n\n figs.tight_layout()\n pdf.savefig(figs)\n plt.close(fig=figs)", "def cdf(self, X, Y):\n assert self.fitted, \"model must be fitted to compute likelihood score\"\n X, Y = self._handle_input_dimensionality(X, Y, fitting=False)\n p = self.sess.run(self.cdf_, feed_dict={self.X_ph: X, self.Y_ph: Y})\n assert p.ndim == 1 and p.shape[0] == X.shape[0]\n return p", "def complexity_pdf(spiketrains, bin_size):\n warnings.warn(\"'complexity_pdf' is deprecated in favor of the Complexity \"\n \"class which has a 'pdf' method\", DeprecationWarning)\n\n complexity = Complexity(spiketrains, bin_size=bin_size)\n\n return complexity.pdf()" ]
[ "0.69857043", "0.61699057", "0.59049094", "0.5864199", "0.58318394", "0.57629466", "0.573494", "0.5734838", "0.56632376", "0.5655967", "0.56498164", "0.56412953", "0.558819", "0.55852294", "0.5578906", "0.5552537", "0.5533782", "0.5533782", "0.551519", "0.5459205", "0.5448148", "0.5439202", "0.54346496", "0.54346496", "0.5407957", "0.5399415", "0.5383903", "0.5379233", "0.5377733", "0.53326327", "0.53093433", "0.5289671", "0.5282161", "0.5255352", "0.5246224", "0.52345765", "0.52304536", "0.5220099", "0.51856256", "0.5172215", "0.5154624", "0.51518583", "0.5135387", "0.51341534", "0.5117266", "0.5114184", "0.5113833", "0.5107374", "0.5097309", "0.50874037", "0.5069802", "0.5066806", "0.5055306", "0.5054924", "0.504528", "0.50370765", "0.5011828", "0.5007423", "0.5001306", "0.49989602", "0.49968305", "0.49880978", "0.4987999", "0.49845248", "0.49843746", "0.49649927", "0.4961123", "0.49581125", "0.4948705", "0.49410772", "0.4940465", "0.4936745", "0.49326405", "0.49284065", "0.4925423", "0.49159005", "0.49153972", "0.49123156", "0.49063674", "0.49058974", "0.49058172", "0.48931798", "0.48915714", "0.48839584", "0.48838767", "0.4878853", "0.4874939", "0.4864713", "0.48548177", "0.48537922", "0.48476714", "0.4837485", "0.48343998", "0.48280606", "0.48241147", "0.48152342", "0.48109028", "0.48064855", "0.47908098", "0.478813" ]
0.6718552
1
Tests the null hypothesis that both samples belong to the same distribution.
def _two_sample_kolmogorov_smirnov_pmf( pmf1: np.ndarray, pmf2: np.ndarray, alpha: float = 0.05 ) -> Tuple[float, float, bool]: # note: yields different results as `scipy.stats.ks_2samp` cdf1 = np.cumsum(pmf1) cdf2 = np.cumsum(pmf2) n1 = cdf1[-1] n2 = cdf2[-1] # cannot be inplace because of type conversion cdf1 = cdf1 / n1 cdf2 = cdf2 / n2 statistic, pvalue = _two_sample_kolmogorov_smirnov_same_length(cdf1, cdf2, n1, n2) reject = pvalue < alpha return statistic, pvalue, reject
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_mc_t_two_sample_no_mc(self):\r\n x = array([1, 1, 1])\r\n y = array([0, 0, 0])\r\n self.assertEqual(mc_t_two_sample(x, x), (nan, nan, [], nan))", "def test_t_two_sample_no_variance(self):\r\n # By default should return (None, None) to mimic R's t.test.\r\n x = array([1, 1., 1])\r\n y = array([0, 0, 0.0])\r\n self.assertEqual(t_two_sample(x, x), (nan, nan))\r\n self.assertEqual(t_two_sample(x, y), (nan, nan))\r\n\r\n # Test none_on_zero_variance=False on various tail types. We use\r\n # self.assertEqual instead of self.assertFloatEqual because the latter\r\n # sees inf and -inf as being equal.\r\n\r\n # Two tailed: a < b\r\n self.assertEqual(t_two_sample(y, x, none_on_zero_variance=False),\r\n (float('-inf'), 0.0))\r\n\r\n # Two tailed: a > b\r\n self.assertEqual(t_two_sample(x, y, none_on_zero_variance=False),\r\n (float('inf'), 0.0))\r\n\r\n # One-tailed 'high': a < b\r\n self.assertEqual(t_two_sample(y, x, tails='high',\r\n none_on_zero_variance=False),\r\n (float('-inf'), 1.0))\r\n\r\n # One-tailed 'high': a > b\r\n self.assertEqual(t_two_sample(x, y, tails='high',\r\n none_on_zero_variance=False),\r\n (float('inf'), 0.0))\r\n\r\n # One-tailed 'low': a < b\r\n self.assertEqual(t_two_sample(y, x, tails='low',\r\n none_on_zero_variance=False),\r\n (float('-inf'), 0.0))\r\n\r\n # One-tailed 'low': a > b\r\n self.assertEqual(t_two_sample(x, y, tails='low',\r\n none_on_zero_variance=False),\r\n (float('inf'), 1.0))\r\n\r\n # Should still receive (nan, nan) if the lists have no variance and\r\n # have the same single value.\r\n self.assertEqual(t_two_sample(x, x, none_on_zero_variance=False),\r\n (nan, nan))\r\n self.assertEqual(t_two_sample(x, [1, 1], none_on_zero_variance=False),\r\n (nan, nan))", "def test_none(self):\n esnA = ESN(N_in,N_out,random_state=None)\n esnB = ESN(N_in,N_out,random_state=None)\n self._compare(esnA,esnB,should_be=\"different\")", "def test_mixed():\n # assert the distribution of the samples is close to the distribution of the data\n # using a kstest for continuous + a cstest for categorical.", "def test_check_null_weight_with_nonzeros() -> None:\n sample_weight = np.ones_like(y_toy)\n sw_out, X_out, y_out = check_null_weight(sample_weight, X_toy, y_toy)\n np.testing.assert_almost_equal(sw_out, sample_weight)\n np.testing.assert_almost_equal(X_out, X_toy)\n np.testing.assert_almost_equal(y_out, y_toy)", "def test_sample(self):\n dist = self.many_samples([0, 0, 0, 1])\n self.assertEquals(3, dist.argMax())\n\n dist = self.many_samples([1, 0, 0, 0, 0])\n self.assertEquals(0, dist.argMax())\n\n dist = self.many_samples([0.5, 0, 0, 0.25, 0.25])\n self.assertAlmostEquals(dist[0], 0.5, delta=0.01)\n self.assertAlmostEquals(dist[3], 0.25, delta=0.01)\n self.assertAlmostEquals(dist[4], 0.25, delta=0.01)\n self.assertEquals(dist[1], 0)\n self.assertEquals(dist[2], 0)\n\n with self.assertRaises(AssertionError):\n diffp.sample([0.5, 0.5, 0.01])", "def testAlphaTwoSamplesMatchANormalDistribution(self):\n num_samples = 16384\n scale = 1.7\n rng = random.PRNGKey(0)\n samples = self._distribution.draw_samples(rng, 2 * jnp.ones(num_samples),\n scale * jnp.ones(num_samples))\n # Perform the Kolmogorov-Smirnov test against a normal distribution.\n ks_statistic = scipy.stats.kstest(samples, 'norm', (0., scale)).statistic\n self.assertLess(ks_statistic, 0.01)", "def report_ttest_2sample(null_hypothesis, sample1, sample2, paired, alpha=0.05):\n\n if paired:\n t_value, p_value = stats.ttest_rel(sample1, sample2)\n else:\n t_value, p_value = stats.ttest_ind(sample1, sample2)\n print('Test for null hypothesis \"{}\".'.format(null_hypothesis))\n print('Sample 1 mean: {}, Sample 1 SD: {}'.format(np.mean(sample1), np.std(sample1)))\n print('Sample 2 mean: {}, Sample 2 SD: {}'.format(np.mean(sample2), np.std(sample2)))\n print('t({})={}, p={}.'.format(len(sample1)-1, t_value, p_value))\n if p_value < alpha:\n print('Reject null hypothesis.\\n')\n else:\n print('Fail to reject null hypothesis.\\n')", "def test_check_null_weight_with_none() -> None:\n sw_out, X_out, y_out = check_null_weight(None, X_toy, y_toy)\n assert sw_out is None\n np.testing.assert_almost_equal(X_out, X_toy)\n np.testing.assert_almost_equal(y_out, y_toy)", "def test_t_paired_no_variance(self):\r\n x = [1, 1, 1]\r\n y = [0, 0, 0]\r\n self.assertEqual(t_paired(x, x), (nan, nan))\r\n self.assertEqual(t_paired(x, y), (nan, nan))", "def discrete_one_samp_ks(distribution1: np.array, distribution2: np.array, num_samples: int) -> Tuple[float, bool]:\n cutoff = 1.36 / math.sqrt(num_samples)\n ecdf1 = np.array([sum(distribution1[:i + 1]) for i in range(len(distribution1))])\n ecdf2 = np.array([sum(distribution2[:i + 1]) for i in range(len(distribution2))])\n max_diff = np.absolute(ecdf1 - ecdf2).max()\n return max_diff, max_diff < cutoff", "def test_multiple_rng(self):\r\n rng1 = RandomStreams(1234)\r\n rng2 = RandomStreams(2392)\r\n assert rng1.random_state_variables is not rng2.random_state_variables", "def assertGaussianOversampledPsfEqual(self, lhs, rhs):\n self.assertEqual(lhs.getSigma(), rhs.getSigma())\n self.assertEqual(lhs.getOversampleFactor(), rhs.getOversampleFactor())\n self.assertEqual(lhs.getTargetSize(), rhs.getTargetSize())", "def test_with_predefined_dist(self, seed):\n dim = Dimension(\"yolo\", dists.norm, 0.9)\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert dists.norm.rvs(0.9) == samples[0]", "def test_compare_different_expectations(self):\n\n pd_single = norm(0, 1)\n pd = []\n for i in range(0, 3):\n pd.append(pd_single)\n meas = [-1, 0, 1]\n meanCRIGN1, singleCRIGN1 = crign.crign(pd, meas)\n\n pd2 = []\n for i in range(0, 3):\n pd2.append(norm(i, 1))\n meas2 = [-1, 1, 3]\n\n meanCRIGN2, singleCRIGN2 = crign.crign(pd2, meas2)\n\n is_good = np.isclose(singleCRIGN1, singleCRIGN2).all()\n assert_true(is_good, msg=\"Relation of individual CRIGN values should return roughly the same value.\")", "def test_combine_nsamples_different_shapes():\n test_sample_1 = np.ones((2, 13, 21))\n test_sample_2 = np.ones((3, 13, 21))\n pytest.raises(ValueError, utils.combine_nsamples, test_sample_1, test_sample_2)", "def test_check_null_weight_with_zeros() -> None:\n sample_weight = np.ones_like(y_toy)\n sample_weight[:1] = 0.0\n sw_out, X_out, y_out = check_null_weight(sample_weight, X_toy, y_toy)\n np.testing.assert_almost_equal(sw_out, np.array([1, 1, 1, 1, 1]))\n np.testing.assert_almost_equal(X_out, np.array([[1], [2], [3], [4], [5]]))\n np.testing.assert_almost_equal(y_out, np.array([7, 9, 11, 13, 15]))", "def test_t_one_sample(self):\r\n x = array(range(-5, 5))\r\n y = array(range(-1, 10))\r\n self.assertFloatEqualAbs(t_one_sample(x), (-0.5222, 0.6141), 1e-4)\r\n self.assertFloatEqualAbs(t_one_sample(y), (4, 0.002518), 1e-4)\r\n # do some one-tailed tests as well\r\n self.assertFloatEqualAbs(\r\n t_one_sample(y, tails='low'), (4, 0.9987), 1e-4)\r\n self.assertFloatEqualAbs(\r\n t_one_sample(y, tails='high'), (4, 0.001259), 1e-4)", "def test_onesample_two_tailed(self):\n rng = np.random.default_rng(13489132474)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(-5, 2, 100)\n\n ttest = one_sample_ttest(data1, -5)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)", "def test_equal7():\n x = randtool(\"float\", -10, 10, [3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_combine_nsamples_with_pols():\n test_samples_1 = np.ones((3, 2, 13, 21)) * 3\n test_samples_2 = np.ones((3, 2, 13, 21)) * 2\n samples_out = utils.combine_nsamples(test_samples_1, test_samples_2, axis=1)\n test_full_samples = np.ones((3, 2, 2, 13, 21)) * np.sqrt(6)\n assert np.all(test_full_samples == samples_out)", "def test_eq_false_id(self):\n other = PrepSample('1.SKD8.640184', self.prep_template)\n self.assertFalse(self.tester == other)", "def test_continuous():\n # assert the distribution of the samples is close to the distribution of the data\n # using kstest:\n # - uniform (assert p-value > 0.05)\n # - gaussian (assert p-value > 0.05)\n # - inversely correlated (assert correlation < 0)", "def test_equal6():\n x = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_multiple_rng_aliasing(self):\r\n rng1 = RandomStreams(1234)\r\n rng2 = RandomStreams(2392)\r\n assert rng1.state_updates is not rng2.state_updates\r\n assert rng1.gen_seedgen is not rng2.gen_seedgen", "def same_water_present(x, y):\n if (x == None) or (y == None): return 0.0\n if len(x.intersection(y)) > 0: return 1.0\n return 0.0", "def test_equals_with_different_sources(self):\n measurement_1 = Measurement(self.metric(), sources=[{\"source_uuid\": SOURCE_ID}])\n measurement_2 = Measurement(self.metric())\n self.assertFalse(measurement_1.equals(measurement_2))", "def test_t_two_sample_switch(self):\r\n sample = array([4.02, 3.88, 3.34, 3.87, 3.18])\r\n x = array([3.02])\r\n self.assertFloatEqual(t_two_sample(x, sample), (-1.5637254, 0.1929248))\r\n self.assertFloatEqual(t_two_sample(sample, x), (1.5637254, 0.1929248))\r\n\r\n # can't do the test if both samples have single item\r\n self.assertEqual(t_two_sample(x, x), (None, None))\r\n\r\n # Test special case if t=0.\r\n self.assertFloatEqual(t_two_sample([2], [1, 2, 3]), (0.0, 1.0))\r\n self.assertFloatEqual(t_two_sample([1, 2, 3], [2]), (0.0, 1.0))", "def test_rng_null(self):\n assert check_random_state(None) is np.random.mtrand._rand", "def test_eq_false_type(self):\n other = PrepSample(self.sample_id, PrepTemplate(1))\n self.assertFalse(self.tester == other)", "def test_equals_with_different_scales(self):\n measurement_1 = Measurement(self.metric(), {\"count\": {\"status\": \"target_met\"}})\n measurement_2 = Measurement(self.metric(), {\"count\": {\"status\": \"target_not_met\"}})\n self.assertFalse(measurement_1.equals(measurement_2))", "def test_equal_probability():\n from numpy import array, sqrt, count_nonzero\n\n energy = MagicMock()\n\n density = array([1, 0, 99])\n mc = MonteCarlo(energy, density)\n changes_at_zero = [\n (density - mc.change_density(density))[0] != 0 for i in range(10000)]\n assert count_nonzero(changes_at_zero) \\\n == approx(0.01 * len(changes_at_zero), 0.5 * sqrt(len(changes_at_zero)))", "def test_eq_false_id(self):\n other = Sample('1.SKD8.640184', self.sample_template)\n self.assertFalse(self.tester == other)", "def test_mc_t_two_sample_single_obs_sample(self):\r\n sample = array([4.02, 3.88, 3.34, 3.87, 3.18])\r\n x = array([3.02])\r\n exp = (-1.5637254, 0.1929248)\r\n obs = mc_t_two_sample(x, sample)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertFloatEqual(len(obs[2]), 999)\r\n self.assertIsProb(obs[3])\r\n\r\n exp = (1.5637254, 0.1929248)\r\n obs = mc_t_two_sample(sample, x)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertFloatEqual(len(obs[2]), 999)\r\n self.assertIsProb(obs[3])\r\n\r\n # Test the case where we can have no variance in the permuted lists.\r\n x = array([1, 1, 2])\r\n y = array([1])\r\n exp = (0.5, 0.666666666667)\r\n obs = mc_t_two_sample(x, y)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertFloatEqual(len(obs[2]), 999)\r\n self.assertIsProb(obs[3])", "def test_correct_build_distribution_values(self):\n timeseries_with_distribution_values = self.build_distribution_value()\n\n distribution_value = main.build_distribution_value(timeseries_with_distribution_values[\"points\"][0][\"value\"][\"distributionValue\"])\n expected_distribution_value = self.build_expected_distribution_value()\n self.assertEqual(distribution_value,expected_distribution_value)", "def test_null_from_normal(self):\n test_mean = 10\n test_std = 5\n clip_on = False\n floats = False\n sparsity = None\n actual_out = null_from_normal(self.samples, self.otus, test_mean,\n test_std, sparsity, floats, clip_on)\n expected_out = \\\n array([[ 19., 12., 15., 21., 19.],\n [ 5., 15., 9., 9., 12.],\n [ 11., 17., 14., 11., 12.],\n [ 12., 17., 9., 12., 6.],\n [ 0., 13., 14., 6., 21.],\n [ 3., 10., 9., 18., 17.],\n [ 11., 12., 6., 0., 8.],\n [ 11., 16., 16., 8., 8.],\n [ 5., 3., 1., 20., 7.],\n [ 8., 4., 14., 2., 9.]])\n self.assertEqual(expected_out, actual_out)\n # test where values are going to get clipped\n test_mean = 5\n test_std = 5\n actual_out = null_from_normal(self.samples, self.otus, test_mean,\n test_std, sparsity, floats, clip_on)\n expected_out = \\\n array([[ 1., 7., 2., 0., 5.],\n [ 7., 5., 7., 2., 3.],\n [ 2., 3., 1., 0., 6.],\n [ 3., 0., 7., 0., 5.],\n [ 9., 6., 11., 0., 7.],\n [ 2., 1., 2., 3., 5.],\n [ 0., 10., 7., 0., 12.],\n [ 14., 11., 4., 0., 10.],\n [ 3., 11., 6., 10., 7.],\n [ 9., 5., 14., 6., 7.]])\n self.assertEqual(expected_out, actual_out)\n # test without clipping\n clip_on = True\n actual_out = null_from_normal(self.samples, self.otus, test_mean,\n test_std, sparsity, floats, clip_on)\n expected_out = \\\n array([[ 14., -2., -1., 10., -1.],\n [ 15., 3., 1., 15., 12.],\n [ 14., 10., 1., 15., 4.],\n [ 9., 10., 4., 8., 10.],\n [ 7., 0., 6., 12., 2.],\n [ 4., 3., 14., 8., 7.],\n [ 1., 8., 2., 5., 2.],\n [ 8., 8., 4., 7., 0.],\n [ -2., 7., 6., 8., 17.],\n [ 10., 0., 11., -2., 3.]])\n self.assertEqual(expected_out, actual_out)\n # test with sparsity and ints off\n seed(0)\n clip_on = True\n floats = True\n sparsity = .8\n test_mean = 10.4\n test_std = 1.4\n actual_out = null_from_normal(self.samples, self.otus, test_mean,\n test_std, sparsity, floats, clip_on)\n expected_out = \\\n array([[ 12.86967328, 0. , 0. , 0. , 0. ],\n [ 9.03181097, 0. , 0. , 0. , 0. ],\n [ 0. , 0. , 0. , 10.57034502, 0. ],\n [ 10.86714406, 0. , 0. , 0. , 0. ],\n [ 0. , 11.31506603, 0. , 9.36096897, 0. ],\n [ 0. , 0. , 0. , 0. , 0. ],\n [ 0. , 10.92942753, 0. , 0. , 0. ],\n [ 10.61888856, 0. , 0. , 0. , 0. ],\n [ 0. , 8.41197489, 0. , 0. , 0. ],\n [ 9.78669598, 0. , 0. , 0. , 0. ]])\n assert_allclose(expected_out, actual_out)\n # using assert_allclose because floats are truncated", "def test_null_from_data(self):\n # define prior data that R will use\n data = array([[ 4., 6., 5., 4., 3.],\n [ 5., 3., 11., 17., 2.],\n [ 8., 4., 4., 13., 0.],\n [ 0., 0., 9., 8., 10.],\n [ 19., 8., 3., 8., 1.],\n [ 5., 1., 14., 4., 3.],\n [ 2., 7., 3., 4., 0.],\n [ 5., 5., 5., 14., 6.],\n [ 2., 3., 6., 0., 5.],\n [ 6., 1., 1., 2., 2.]])\n Rseed = 0\n tpk = 10\n actual_out = null_from_data(data, tpk, Rseed=Rseed)\n expected_out = array([[ 18., 1., 0., 3., 0.],\n [ 2., 10., 10., 3., 22.],\n [ 1., 8., 4., 1., 12.],\n [ 10., 1., 5., 1., 2.],\n [ 8., 0., 14., 28., 5.],\n [ 5., 16., 3., 11., 0.],\n [ 0., 2., 8., 2., 0.],\n [ 0., 12., 2., 1., 3.],\n [ 0., 0., 6., 2., 5.],\n [ 8., 2., 0., 0., 3.]])\n self.assertEqual(expected_out, actual_out)", "def test_null_distribution_wald(self, n_cells: int = 2000, n_genes: int = 100):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n sim = Simulator(num_observations=n_cells, num_features=n_genes)\n sim.generate_sample_description(num_batches=0, num_conditions=0)\n sim.generate()\n\n random_sample_description = pd.DataFrame({\n \"pseudotime\": np.random.random(size=sim.nobs)\n })\n\n test = de.test.continuous_1d(\n data=sim.X,\n continuous=\"pseudotime\",\n df=3,\n formula_loc=\"~ 1 + pseudotime\",\n formula_scale=\"~ 1\",\n factor_loc_totest=\"pseudotime\",\n test=\"wald\",\n sample_description=random_sample_description,\n quick_scale=True,\n batch_size=None,\n training_strategy=\"DEFAULT\",\n dtype=\"float64\"\n )\n summary = test.summary()\n\n # Compare p-value distribution under null model against uniform distribution.\n pval_h0 = stats.kstest(test.pval, 'uniform').pvalue\n\n logging.getLogger(\"diffxpy\").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)\n assert pval_h0 > 0.05, \"KS-Test failed: pval_h0 is <= 0.05!\"\n\n return True", "def test_multiple_rng_aliasing():\r\n rng1 = MRG_RandomStreams(1234)\r\n rng2 = MRG_RandomStreams(2392)\r\n assert rng1.state_updates is not rng2.state_updates", "def test_ppt_distinguishability_werner_hiding_pairs():\n dim = 2\n sigma_0 = (np.kron(np.identity(dim), np.identity(dim)) + swap_operator(dim)) / (dim * (dim + 1))\n sigma_1 = (np.kron(np.identity(dim), np.identity(dim)) - swap_operator(dim)) / (dim * (dim - 1))\n\n states = [sigma_0, sigma_1]\n\n expected_val = 1 / 2 + 1 / (dim + 1)\n\n primal_res = ppt_distinguishability(states, probs=None, dist_method=\"min-error\", strategy=True)\n dual_res = ppt_distinguishability(states, probs=None, dist_method=\"min-error\", strategy=False)\n\n np.testing.assert_equal(np.isclose(primal_res, expected_val, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, expected_val, atol=0.001), True)\n\n primal_res = ppt_distinguishability(\n states, probs=None, dist_method=\"unambiguous\", strategy=True\n )\n dual_res = ppt_distinguishability(states, probs=None, dist_method=\"unambiguous\", strategy=False)\n\n np.testing.assert_equal(np.isclose(primal_res, 1 / 3, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, 1 / 3, atol=0.001), True)", "def test_with_shots_none(self):\n sampler = Sampler()\n result = sampler.run(\n circuits=[self._pqc], parameter_values=[self._pqc_params[1]], shots=None\n ).result()\n self.assertDictAlmostEqual(\n result.quasi_dists[0],\n {\n 0: 0.01669499556655749,\n 1: 0.3363966103502914,\n 2: 0.04992359174946462,\n 3: 0.596984802333687,\n },\n )", "def test_compare(self): \n d1 = heat(\n np.array([[0.5, 1]]),\n np.array([[0.5, 1.1]])\n )\n d2 = heat(\n np.array([[0.5, 1]]),\n np.array([[0.5, 1.5]])\n )\n\n # These are very loose bounds\n assert d1 < d2", "def test_dim_None(a, b, metrics):\n metric, _metric = metrics\n if metric in [effective_sample_size, spearman_r_eff_p_value, pearson_r_eff_p_value]:\n with pytest.raises(ValueError) as excinfo:\n metric(a, b, dim=None)\n assert (\n \"Effective sample size should only be applied to a singular time dimension.\"\n in str(excinfo.value)\n )\n else:\n metric, _metric = metrics\n res = metric(a, b, dim=None)\n assert len(res.dims) == 0, print(res.dims)", "def test_decaydata___ne__(self):\n\n data1 = decaydata.DecayData(\"icrp107\")\n data2 = decaydata.DecayData(\"icrp107\")\n data2.dataset = \"icrp07\"\n self.assertNotEqual(data1, data2)", "def isStochasticallyDominated(wvalues1, wvalues2, probabilitiesForObjectives):\n not_equal = False\n for self_wvalue, other_wvalue, p in zip(wvalues1, wvalues2, probabilitiesForObjectives):\n r = random.random()\n if (r<=p):\n if self_wvalue > other_wvalue:\n return False\n elif self_wvalue < other_wvalue:\n not_equal = True\n return not_equal", "def report_ttest_1sample(\n null_hypothesis: str, sample: List[float], popmean: float,\n one_sided: bool = False, alpha: float = 0.05):\n\n t_value, p_value = stats.ttest_1samp(sample, popmean)\n if one_sided and t_value > 0:\n p_value /= 2\n print('Test for null hypothesis \"{}\".'.format(null_hypothesis))\n print('Sample mean: {}, Sample SD: {}'.format(np.mean(sample), np.std(sample)))\n print('t({})={}, p={}.'.format(len(sample)-1, t_value, p_value))\n if p_value < alpha:\n print('Reject null hypothesis.\\n')\n else:\n print('Fail to reject null hypothesis.\\n')", "def test_self_consistency_no_noise(self):\n popt, pcov = sine_fit(self.data, self.periods)\n print(popt)\n assert_allclose(*fixed_signs(self.p_gt, popt), 1e-4)", "def _assert_not_series_equal_both(a, b, **kwargs):\n _assert_not_series_equal(a, b, **kwargs)\n _assert_not_series_equal(b, a, **kwargs)", "def test_identical(self):\n write this test!", "def test_mc_t_two_sample_no_perms(self):\r\n exp = (-0.11858541225631833, 0.90756579317867436, [], nan)\r\n I = array([7.2, 7.1, 9.1, 7.2, 7.3, 7.2, 7.5])\r\n II = array([8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2])\r\n obs = mc_t_two_sample(I, II, permutations=0)\r\n self.assertFloatEqual(obs, exp)", "def test_equal_on_equal_and_empty(self):\n a = Digest()\n b = Digest()\n\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def test_oss_sample_wt_fit():\n\n # Create the object\n oss = OneSidedSelection(random_state=RND_SEED)\n assert_raises(RuntimeError, oss.sample, X, Y)", "def test_posteriors_good_data(self):\r\n first = [0, 0.25, 0.5, 1, 0.25]\r\n second = [0.25, 0.5, 0, 0.1, 1]\r\n product = [0, 0.125, 0, 0.1, 0.25]\r\n for obs, exp in zip(posteriors(first, second), product):\r\n self.assertFloatEqual(obs, exp)", "def test_empty_parameter(self):\n n = 5\n qc = QuantumCircuit(n, n - 1)\n qc.measure(range(n - 1), range(n - 1))\n sampler = Sampler()\n with self.subTest(\"one circuit\"):\n result = sampler.run([qc], shots=1000).result()\n self.assertEqual(len(result.quasi_dists), 1)\n for q_d in result.quasi_dists:\n quasi_dist = {k: v for k, v in q_d.items() if v != 0.0}\n self.assertDictEqual(quasi_dist, {0: 1.0})\n self.assertEqual(len(result.metadata), 1)\n\n with self.subTest(\"two circuits\"):\n result = sampler.run([qc] * 2, shots=1000).result()\n self.assertEqual(len(result.quasi_dists), 2)\n for q_d in result.quasi_dists:\n quasi_dist = {k: v for k, v in q_d.items() if v != 0.0}\n self.assertDictEqual(quasi_dist, {0: 1.0})\n self.assertEqual(len(result.metadata), 2)", "def test_t_one_observation_no_variance(self):\r\n sample = array([1.0, 1.0, 1.0])\r\n\r\n # Can't perform test if invariant list's single value matches x,\r\n # regardless of none_on_zero_variance.\r\n self.assertEqual(t_one_observation(1, sample), (None, None))\r\n self.assertEqual(t_one_observation(1, sample,\r\n none_on_zero_variance=False), (None, None))\r\n\r\n # Test correct handling of none_on_zero_variance.\r\n self.assertEqual(t_one_observation(2, sample), (None, None))\r\n self.assertEqual(t_one_observation(2, sample,\r\n none_on_zero_variance=False), (float('inf'), 0.0))\r\n self.assertEqual(t_one_observation(2, sample,\r\n none_on_zero_variance=False, tails='low'), (float('inf'), 1.0))", "def test_dice_similarity_all_zeros():\n vector1 = np.array([0, 0, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = dice_similarity.py_func(vector1, vector1)\n score12 = dice_similarity.py_func(vector1, vector2)\n score22 = dice_similarity.py_func(vector2, vector2)\n\n assert score11 == score12 == 0.0, \"Expected different score.\"\n assert score22 == 1.0, \"Expected different score.\"", "def test_equal12():\n x = np.array([[True, False, True], [True, False, True], [True, False, True]])\n y = np.array([[True, False, True], [False, False, False], [True, True, False]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def testEquality(self):\n pass", "def test_two_player_zero_sum_mixture_wellfare(strategies):\n game = gamegen.two_player_zero_sum_game(strategies)\n for prof in game.random_mixtures(20):\n assert np.isclose(\n regret.mixed_social_welfare(game, prof), 0\n ), \"zero sum profile wasn't zero sum\"", "def test_rejection_sampling():\n # Check that it works with a numpy array\n original_samples = np.random.uniform(0, 10, (n_samples, n_params))\n weights = np.random.uniform(0, 5, n_samples)\n new_samples = rejection_sampling(original_samples, weights)\n # new_samples should have less samples than what we started with originally\n assert len(new_samples) <= n_samples\n # Each sample should be in the original posterior table\n assert all(new_sample in original_samples for new_sample in new_samples)\n # Each sample should be unique\n unique = np.unique(new_samples, axis=0)\n assert len(unique) == len(new_samples)\n\n # Now check that it works as expected for the\n # pesummary.utils.samples_dict.SamplesDict object\n original_samples = SamplesDict(\n {param: np.random.uniform(0, 10, n_samples) for param in gw_parameters()}\n )\n weights = np.random.uniform(0, 5, n_samples)\n new_samples = rejection_sampling(original_samples, weights)\n assert new_samples.number_of_samples <= original_samples.number_of_samples\n assert new_samples.parameters == original_samples.parameters\n assert all(\n new_sample in original_samples.samples.T for new_sample in\n new_samples.samples.T\n )", "def test_no_values(self):\r\n values = []\r\n result = multipoint_mean_sd(values)\r\n\r\n self.assertEqual('', result['mean_result'])\r\n self.assertEqual('', result['sd_result'])", "def test0(self):\r\n a = T.constant(2.5)\r\n b = T.constant(numpy.asarray([[[0.5]]]))\r\n b2 = b.dimshuffle()\r\n assert b2.ndim == 0\r\n d_a = T.DimShuffle([], [])(a)\r\n d_b = T.DimShuffle([True, True, True], [0, 2, 1])(b)\r\n d_a2 = T.DimShuffle([], ['x', 'x', 'x'])(a)\r\n\r\n self.assertTrue(_as_scalar(a) == a)\r\n self.assertTrue(_as_scalar(b) != b)\r\n self.assertTrue(_as_scalar(d_a) != d_a)\r\n self.assertTrue(_as_scalar(d_b) != d_b)\r\n self.assertTrue(_as_scalar(d_a2) != d_a2)", "def test_eq_false_type(self):\n other = Sample(self.sample_id, SampleTemplate(1))\n self.assertFalse(self.tester == other)", "def test_dice_similarity_all_zeros_compiled():\n vector1 = np.array([0, 0, 0, 0])\n vector2 = np.array([1, 1, 1, 1])\n score11 = dice_similarity(vector1, vector1)\n score12 = dice_similarity(vector1, vector2)\n score22 = dice_similarity(vector2, vector2)\n\n assert score11 == score12 == 0.0, \"Expected different score.\"\n assert score22 == 1.0, \"Expected different score.\"", "def test_asymmetric_noise_signal(self):\n np.random.seed(0)\n test_ts = self.create_ts(length=100 * 24, freq=\"1h\", signal_to_noise_ratio=0)\n ts1 = self.create_ts(length=100 * 24, freq=\"1h\", signal_to_noise_ratio=0)\n ts2 = self.create_ts(length=100 * 24, freq=\"1h\", signal_to_noise_ratio=0)\n\n noise = (np.random.rand(100 * 24) - 0.5) * (np.random.rand(100 * 24) > 2 / 3)\n noise *= noise > 0\n\n # add strictly positive noise to ts1 and strictly negative noise to ts2\n ts1.value += abs(ts1.value * noise)\n ts2.value -= abs(ts2.value * noise)\n\n ts1.value[93 * 24] += 20\n ts1.value[96 * 24] -= 20\n ts2.value[93 * 24] += 20\n ts2.value[96 * 24] -= 20\n\n model = ProphetDetectorModel(score_func=\"z_score\")\n response1 = model.fit_predict(test_ts[90 * 24 :], ts1[: 90 * 24])\n response2 = model.fit_predict(test_ts[90 * 24 :], ts2[: 90 * 24])\n\n self.assertGreater(\n response2.scores.value[3 * 24], response1.scores.value[3 * 24]\n )\n self.assertGreater(\n response2.scores.value[6 * 24], response1.scores.value[6 * 24]\n )", "def test_correct_p_values_no_change(self):\r\n exp = [None, 0.008]\r\n obs = self.mc._correct_p_values([None, 0.008])\r\n self.assertEqual(obs, exp)\r\n exp = [0.007]\r\n obs = self.mc._correct_p_values([0.007])\r\n assert_almost_equal(obs, exp)", "def test_1sample(self):\r\n c = AlphaDiversityCalc(observed_otus)\r\n self.assertEqual(c(data_path=self.single_sample_otu_table_fp), [2])", "def test_SeedCoherenceAnalyzer_same_Fs():\r\n\r\n Fs1 = np.pi\r\n Fs2 = 2 * np.pi\r\n t = np.arange(256)\r\n\r\n T1 = ts.TimeSeries(np.random.rand(t.shape[-1]),\r\n sampling_rate=Fs1)\r\n\r\n T2 = ts.TimeSeries(np.random.rand(t.shape[-1]),\r\n sampling_rate=Fs2)\r\n\r\n npt.assert_raises(ValueError, nta.SeedCoherenceAnalyzer, T1, T2)", "def test_two_unsampled_arms(self):\n self._test_two_unsampled_arms()", "def assert_predictions_equal(first, second, x):\n preds1 = first.predict(x, batch_size=batch_size)\n preds2 = second.predict(x, batch_size=batch_size)\n np.testing.assert_array_equal(preds1, preds2)", "def test_not_equal_on_equal_and_empty(self):\n a = Digest()\n b = Digest()\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def test_never_same():\n g = RG.larger_random()\n hundred_calls = set([next(g) for _ in range(20)])\n assert len(hundred_calls) == 20", "def test_no_duplicates_and_positives_in_negative_sample(self):\n model = PoincareModel(self.data_large, negative=3)\n positive_nodes = model.node_relations[0] # Positive nodes for node 0\n num_samples = 100 # Repeat experiment multiple times\n for i in range(num_samples):\n negatives = model._sample_negatives(0)\n self.assertFalse(positive_nodes & set(negatives))\n self.assertEqual(len(negatives), len(set(negatives)))", "def no_match():\n S1=Spectrum.Spectrum()\n S1.add_peak(50.7,234)\n S1.add_peak(54.6,585)\n S1.add_peak(60.7,773)\n S1.add_peak(65.6,387)\n S1.add_peak(87.7,546)\n S1.add_peak(104.6,598)\n S1.pep_mass=100\n S1.euclidean_scale()\n\n S2=Spectrum.Spectrum()\n S2.add_peak(50.2,234)\n S2.add_peak(53.8,585)\n S2.add_peak(61.3,773)\n S2.add_peak(66.2,387)\n S2.add_peak(88.1,546)\n S2.add_peak(103.9,598)\n S2.pep_mass=100\n S2.euclidean_scale()\n\n score,peaks=similarity.cosine_score_max(S1,S2)\n assert peaks==0, \"Incorrect number of peaks matched with greedy method\"\n assert score==0, \"Incorrect score with greedy method\"\n \n\n score,peaks=similarity.cosine_score_greedy(S1,S2)\n assert peaks==0, \"Incorrect number of peaks matched with maximum weighted method\"\n assert score==0, \"Incorrect score with maximum weighted method\"", "def test_equal_basic(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"equal\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::eq\"},\n )", "def test_self_consistency_noise(self):\n # test with SNR = 100\n SNR = self.p_gt[0] / 9\n noisy_data = self.data + SNR * RNG.normal(size=self.data.shape)\n popt, pcov = sine_fit(noisy_data, self.periods)\n assert_allclose(*fixed_signs(self.p_gt, popt), 5e-1)", "def test_differencer_produces_expected_results(na_handling):\n transformer = Differencer(na_handling=na_handling)\n y_transformed = transformer.fit_transform(y_simple)\n y_expected = y_simple_expected_diff[na_handling]\n\n _assert_array_almost_equal(y_transformed, y_expected)", "def test_t_paired_specific_difference(self):\r\n x, y = self.x, self.y\r\n # difference is 0.2, so test should be non-significant if 0.2 passed\r\n self.failIf(t_paired(y, x, exp_diff=0.2)[0] > 1e-10)\r\n # same, except that reversing list order reverses sign of difference\r\n self.failIf(t_paired(x, y, exp_diff=-0.2)[0] > 1e-10)\r\n # check that there's no significant difference from the true mean\r\n self.assertFloatEqual(\r\n t_paired(y, x, exp_diff=0.2)[1], 1, 1e-4)", "def test_eq_true(self):\n other = PrepSample(self.sample_id, self.prep_template)\n self.assertTrue(self.tester == other)", "def test_correct_p_values_all_None(self):\r\n exp = [None, None]\r\n obs = self.mc._correct_p_values([None, None])\r\n self.assertEqual(obs, exp)", "def test_sample(self):\n seed = 5\n space = Space()\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim1 = Categorical(\"yolo\", OrderedDict(zip(categories, probs)), shape=(2, 2))\n space.register(dim1)\n dim2 = Integer(\"yolo2\", \"uniform\", -3, 6)\n space.register(dim2)\n dim3 = Real(\"yolo3\", \"norm\", 0.9)\n space.register(dim3)\n\n point = space.sample(seed=seed)\n rng = check_random_state(seed)\n test_point = [\n dict(\n yolo=dim1.sample(seed=rng)[0],\n yolo2=dim2.sample(seed=rng)[0],\n yolo3=dim3.sample(seed=rng)[0],\n )\n ]\n assert len(point) == len(test_point) == 1\n assert len(point[0].params) == len(test_point[0]) == 3\n assert np.all(point[0].params[\"yolo\"] == test_point[0][\"yolo\"])\n assert point[0].params[\"yolo2\"] == test_point[0][\"yolo2\"]\n assert point[0].params[\"yolo3\"] == test_point[0][\"yolo3\"]\n\n points = space.sample(2, seed=seed)\n rng = check_random_state(seed)\n points1 = dim1.sample(2, seed=rng)\n points2 = dim2.sample(2, seed=rng)\n points3 = dim3.sample(2, seed=rng)\n test_points = [\n dict(yolo=points1[0], yolo2=points2[0], yolo3=points3[0]),\n dict(yolo=points1[1], yolo2=points2[1], yolo3=points3[1]),\n ]\n assert len(points) == len(test_points) == 2\n for i in range(2):\n assert len(points[i].params) == len(test_points[i]) == 3\n assert np.all(points[i].params[\"yolo\"] == test_points[i][\"yolo\"])\n assert points[i].params[\"yolo2\"] == test_points[i][\"yolo2\"]\n assert points[i].params[\"yolo3\"] == test_points[i][\"yolo3\"]", "def test_unequal_variance_two_tailed(self):\n rng = np.random.default_rng(135481321)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(10, 2, 200)\n data2 = rng.normal(10, 2, 200)\n\n ttest = unequal_variance_ttest(data1, data2)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)", "def assert_wrappers_equal(first, second):\n assert first.sk_params == second.sk_params\n assert first.history_ == second.history_\n if not first.model_ or not second.model_:\n assert first.model_ == second.model_\n else:\n assert_models_equal(first.model, second.model)", "def test_noise_equiv_bandwidth():\n win = windows.blackmanharris(2000)\n assert np.isclose(2, 1.0 / utils.noise_equivalent_bandwidth(win), rtol=1e-2)", "def test_not_equal(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"notEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::ne\"},\n )", "def test_eq_true(self):\n other = Sample(self.sample_id, self.sample_template)\n self.assertTrue(self.tester == other)", "def test_w_and_without():\n A = Node(\"A\", [\"B\"], {\"B\": np.array([[1,0],[1,.1]])})\n B = Node(\"B\", [], {})\n net = CyberNet([A,B])\n T=10\n data = gen_data(T, net, {\"A\": \"normal\", \"B\":\"normal\"})\n logn_fact = gen_logn_fact(data)\n pdata_no_a = prob_model_no_attacker(net, data, T, logn_fact)\n pdata_a = prob_model_given_data_times(net, data, {}, T, logn_fact,\n {\"A\": \"normal\",\n \"B\":\"normal\"})\n np.testing.assert_almost_equal(pdata_no_a, pdata_a)\n\n np.testing.assert_almost_equal(np.log(poisson.pmf(len(data[0]), 10)), pdata_a)", "def _test_sampdup(t):\n return t.shape[1] != len(set(t.ids(axis='sample')))", "def test_call_na_samples(self):\r\n est = Chao1MultinomialPointEstimator(asarray([4, 3, 4, 5]))\r\n obs = est(42)\r\n self.assertEqual(obs, (None, None, None, None))", "def test_all_pairs_t_test_no_perms(self):\r\n exp = \"\"\"# The tests of significance were performed using a two-sided Student's two-sample t-test.\r\n# Alternative hypothesis: Group 1 mean != Group 2 mean\r\n# Entries marked with \"N/A\" could not be calculated because at least one of the groups\r\n# of distances was empty, both groups each contained only a single distance, or\r\n# the test could not be performed (e.g. no variance in groups with the same mean).\r\nGroup 1\tGroup 2\tt statistic\tParametric p-value\tParametric p-value (Bonferroni-corrected)\tNonparametric p-value\tNonparametric p-value (Bonferroni-corrected)\r\nfoo\tbar\t-6.6\t0.00708047956412\t0.0212414386924\tN/A\tN/A\r\nfoo\tbaz\t-9.79795897113\t0.000608184944463\t0.00182455483339\tN/A\tN/A\r\nbar\tbaz\t-3.0\t0.0576688856224\t0.173006656867\tN/A\tN/A\r\n\"\"\"\r\n obs = all_pairs_t_test(self.labels2, self.dists2,\r\n num_permutations=0)\r\n self.assertEqual(self.remove_nums(obs), self.remove_nums(exp))", "def test_1qubit(self):\n qc = QuantumCircuit(1)\n qc.measure_all()\n qc2 = QuantumCircuit(1)\n qc2.x(0)\n qc2.measure_all()\n\n sampler = Sampler()\n result = sampler.run([qc, qc2]).result()\n self.assertIsInstance(result, SamplerResult)\n self.assertEqual(len(result.quasi_dists), 2)\n self.assertDictAlmostEqual(result.quasi_dists[0], {0: 1})\n self.assertDictAlmostEqual(result.quasi_dists[1], {1: 1})", "def test_equal10():\n x = np.array([[True, False, True], [True, False, True], [True, False, True]])\n y = np.array([[True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_mc_t_two_sample_no_permuted_variance(self):\r\n # Verified against R's t.test() and Deducer::perm.t.test().\r\n x = array([1, 1, 2])\r\n y = array([2, 2, 1])\r\n\r\n exp = (-0.70710678118654791, 0.51851851851851838)\r\n obs = mc_t_two_sample(x, y, permutations=10000)\r\n\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 10000)\r\n self.assertCorrectPValue(0.97, 1.0, mc_t_two_sample, [x, y],\r\n {'permutations': 10000}, p_val_idx=3)", "def test_oss_sample_wrong_X():\n\n # Create the object\n oss = OneSidedSelection(random_state=RND_SEED)\n oss.fit(X, Y)\n assert_raises(RuntimeError, oss.sample, np.random.random((100, 40)),\n np.array([0] * 50 + [1] * 50))", "def test_equals(self):\n measurement_1 = Measurement(self.metric())\n measurement_2 = Measurement(self.metric())\n self.assertTrue(measurement_1.equals(measurement_2))", "def test_sufficient_statistics(self):\n assert (\n len(self.data),\n self.data.var(),\n self.data.mean(),\n ) == sufficient_statistics(self.data)", "def testReproductionMapZeroDensity(self):\n log_stream1 = StringIO()\n sim1 = Simulation(logging_level=50, stream=log_stream1)\n sim1.set_simulation_parameters(\n seed=5, task=47, output_directory=\"output\", min_speciation_rate=0.01, sigma=2, deme=1, sample_size=0.01\n )\n sim1.set_map_files(\n \"null\", fine_file=\"sample/SA_sample_fine2.tif\", reproduction_map=\"sample/SA_sample_reproduction.tif\"\n )\n sim1.run()\n sim2 = Simulation(logging_level=60)\n sim2.set_simulation_parameters(\n seed=6, task=47, output_directory=\"output\", min_speciation_rate=0.01, sigma=2, deme=1, sample_size=0.01\n )\n sim2.set_map_files(\n \"null\", fine_file=\"sample/SA_sample_fine.tif\", reproduction_map=\"sample/SA_sample_reproduction.tif\"\n )\n sim2.add_historical_map(\"sample/SA_sample_fine2.tif\", \"none\", 10.0, 0.0)\n sim2.run()\n log1 = log_stream1.getvalue().replace(\"\\r\", \"\").replace(\"\\n\", \"\")\n self.assertEqual(186, sim1.get_species_richness())\n self.assertEqual(195, sim2.get_species_richness())\n self.assertEqual(\"Density is zero where reproduction map is non-zero. This is likely incorrect.\", log1)", "def dist(self, one, two):\n return sum((one[0] != two[0], one[1] != two[1]))", "def test_tanimoto_distance(get_distributions):\n for i, dist_a in enumerate(get_distributions):\n for j, dist_b in enumerate(get_distributions):\n tanimototo = tanimoto_distance(dist_a, dist_b)\n if i == j:\n assert pytest.approx(tanimototo, 0.0001) == 1\n else:\n assert tanimototo < 1", "def assert_result_equal(self, x, y):\n np.testing.assert_array_equal(x, y)", "def test_does_not_sample_twice_ppswor(self):\n with self.assertRaises(ValueError):\n s = private_sampling.ThresholdSample(\n 1.0, private_sampling.PpsworSamplingMethod)\n s.process(\"a\", math.log(FAILURE_PROBABILITY_INVERSE, math.e))\n s.process(\"a\", 1)" ]
[ "0.6507358", "0.6455645", "0.6431785", "0.6414398", "0.6348298", "0.6341265", "0.633865", "0.6277656", "0.6269617", "0.6268249", "0.6254944", "0.6232321", "0.6192658", "0.6153703", "0.6132823", "0.6105993", "0.6078335", "0.6060871", "0.6044297", "0.60279286", "0.60275316", "0.60257775", "0.6012076", "0.5995758", "0.5993561", "0.599297", "0.5988163", "0.5987001", "0.597105", "0.5942966", "0.5931817", "0.59299654", "0.59298563", "0.5929634", "0.59233695", "0.59086764", "0.59083825", "0.5907589", "0.5894569", "0.58854574", "0.5884519", "0.58804387", "0.58800834", "0.587476", "0.5869039", "0.58477974", "0.58364767", "0.5829008", "0.5827346", "0.5820381", "0.58156127", "0.5813967", "0.5807133", "0.57989675", "0.5797456", "0.57937217", "0.57819325", "0.57717973", "0.5768223", "0.57533664", "0.57509655", "0.5748202", "0.5747308", "0.57367104", "0.57247686", "0.57195425", "0.5712072", "0.57075274", "0.57062066", "0.5702098", "0.5691527", "0.5690207", "0.5690121", "0.56892", "0.56871", "0.568373", "0.56812274", "0.56775516", "0.56690985", "0.565881", "0.56571877", "0.56489724", "0.56451976", "0.56426406", "0.56416947", "0.5636528", "0.56331617", "0.5623486", "0.5623431", "0.56164765", "0.56008595", "0.55999315", "0.55921304", "0.5592033", "0.5590868", "0.55899644", "0.55827063", "0.55826443", "0.5581413", "0.5578551", "0.5575916" ]
0.0
-1
Calculate stochastic matrix `pm` to the power of infinity, by finding the eigenvector which corresponds to the eigenvalue 1.
def inf_matrix_power(pm: np.ndarray, dtype=np.float64) -> np.ndarray: w, v = np.linalg.eig( pm ) # scipy.linalg.eig would probably by faster as it can return the left and right eigen vectors if not np.isclose(w[0], 1.0): raise ValueError("The first eigenvalue is not none. Is this a right stochastic matrix?") vi = np.linalg.inv(v) d = np.zeros(pm.shape[0], dtype=dtype) d[0] = 1.0 return np.matmul(v, np.matmul(np.diag(d), vi))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_E0(self) -> float:\n noisy = self.kernel_eigenvectors_[-1].copy()\n np.random.shuffle(noisy)\n\n kernel_eigenvectors = self.kernel_eigenvectors_[:-1]\n kernel_eigenvectors.append(noisy)\n\n eigenvectors_matrix = scipy.sparse.csr_matrix(\n np.column_stack([eigenvector for eigenvector in kernel_eigenvectors])\n )\n\n if len(kernel_eigenvectors) == 2:\n ev0 = kernel_eigenvectors[0]\n ev1 = kernel_eigenvectors[1]\n _, Gamma, _ = scipy.sparse.linalg.svds(\n ev0.T @ ev1, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n else:\n _, Gamma, _ = scipy.sparse.linalg.svds(\n eigenvectors_matrix, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n\n Gamma.sort()\n gamma2 = Gamma[-2]\n E0 = (1 + gamma2) / 2\n return E0", "def test_eigenvalues_of_too_few_points_results_in_0():\n a = np.array([5])\n pc = create_point_cloud(a, a, a)\n\n compute_features(pc, [[0]], pc, [\"eigenv_1\", \"eigenv_2\", \"eigenv_3\"], InfiniteCylinder(5))\n\n eigen_val_123 = np.array([pc[keys.point]['eigenv_{}'.format(i)]['data'] for i in [1, 2, 3]])\n assert not np.any(np.isnan(eigen_val_123))\n assert not np.any(np.isinf(eigen_val_123))", "def eigensystem(mat):\n e, v = numpy.linalg.eig(mat)\n\n # `eig` returns complex results but we know all of the\n # eigenstates have real energy.\n e = numpy.real(e)\n\n items = zip(e, v.T)\n items = sorted(items, key = operator.itemgetter(0))\n e, v = zip(*items)\n\n return (e, v)", "def eigensolve(self, epsilon=0.85):\n raise NotImplementedError(\"eigensolve Incomplete\")", "def P(self):\n self.eigenmatrix()", "def posdef_eig_self_adjoint(mat):\n evals, evecs = tf.self_adjoint_eig(mat)\n evals = tf.abs(evals) # Should be equivalent to svd approach.\n\n return evals, evecs", "def check(mat, otp):\n prd = mat*otp\n eigval = prd[0]/otp[0]\n print 'computed eigenvalue :' , eigval\n [eigs, vecs] = np.linalg.eig(mat)\n abseigs = list(abs(eigs))\n ind = abseigs.index(max(abseigs))\n print ' largest eigenvalue :', eigs[ind]", "def posdef_eig(mat):\n return posdef_eig_functions[POSDEF_EIG_METHOD](mat)", "def initial_energy(spin_matrix, n_spins):\n\n E = 0\n M = 0\n\n for i in range(n_spins):\n for j in range(n_spins):\n\n left = spin_matrix[i-1, j] if i>0 else spin_matrix[n_spins - 1, j]\n above = spin_matrix[i, j-1] if j>0 else spin_matrix[i, n_spins - 1]\n\n E -= spin_matrix[i,j]*(left+above)\n M += spin_matrix[i,j]\n\n return E, M", "def eigen(M):\n values, vectors = np.linalg.eig(M)\n return values, vectors", "def get_E(J,k):\n E = -2 * J * np.cos(k) # energyeigenvalue \n return E", "def eigenvects(mat):\n # Check if symbols are present\n if hasSymbols(mat):\n return mat.eigenvects()\n # Purely numeric matrix\n newMat = recursiveEvaluate(mat.as_mutable())\n return newMat.eigenvects()", "def regular(P):\n try:\n cols = P.shape[0]\n ans = np.ones((1, cols))\n # eq = np.matmul(ans, P)\n # s = np.array(np.arange(1, cols + 1))\n eq = np.vstack([P.T - np.identity(cols), ans])\n # va, vec = np.linalg .eig(P)\n results = np.zeros((cols, 1))\n results = np.vstack([results, np.array([1])])\n statetionary = np.linalg.solve(eq.T.dot(eq), eq.T.dot(results)).T\n # print(statetionary)\n # print(np.argwhere(statetionary < 0))\n if len(np.argwhere(statetionary < 0)) > 0:\n return None\n return statetionary\n except Exception as e:\n return None", "def Problem4(n):\n A = Problem2(n)\n eig = min(sl.eigs(A.asfptype(), which='SM')[0])\n \n print \"lamba*n^2 approaches pi^2 as n goes to infinity\"\n return eig*n**2", "def eigCent(A):\n lam,V = np.linalg.eig(A)\n v = V[:,np.argmax(lam)]\n v = v*(1./v[0])\n return v", "def solve(mat, y):\n reduced = gaussian_elim(mat)\n sol = np.zeros(shape=(mat.shape[0]))\n S = 0\n for i in reversed(range(len(sol))):\n sol[i] = (y[i]-S) / reduced[i][i]\n S += y[i] - S\n return sol", "def solve_for_eigenvectors(matrix, num, mode=\"general\"):\n\n # Construct a sparse matrix\n if mode == \"general\":\n return linalg.eigs(matrix, num)\n\n if mode == \"symmetric\":\n return linalg.eigsh(matrix, num)", "def calculate_biggest_eigenvalue(cls, covariance_matrix):\n timer = TimerHandler()\n timer.start(\"eigen2\")\n eigvals = scipy.linalg.eigh(covariance_matrix, \n eigvals_only = True, \n eigvals = (covariance_matrix.shape[0] -1,covariance_matrix.shape[0]-1), \n overwrite_a = True)\n return eigvals[0]", "def heavy_fixCM_eigvals(NP, b, c, params):\n l = params['l']\n k = params['k']\n I3 = params['I3']\n # Here, omega_3 is just the MAGNITUDE, not signed\n w3 = np.abs(params['w3'][0])\n gn = params['Mm'] * params['g']\n\n # Check output if small system\n print 'gn = ', gn\n print 'b = ', b\n print 'c = ', c\n\n if NP == 1:\n pass\n elif NP == 2:\n matrix = -np.array([[0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0.],\n [(-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.,\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.],\n [0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3)],\n [(-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.]\n ])\n print 'exact matrix = ', matrix\n eigvals = np.array([\n 1j * l * gn / (I3 * w3),\n -1j * l * gn / (I3 * w3),\n l * np.sqrt(gn) * np.sqrt(0j - 2. * l * k * (-1) ** (b) - gn) / (I3 * w3),\n -l * np.sqrt(gn) * np.sqrt(0j - 2. * l * k * (-1) ** (b) - gn) / (I3 * w3)\n ])\n print 'exact_eigvals are =', eigvals\n return eigvals\n elif NP == 3:\n matrix = -np.array([[0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0., 0., 0.],\n [(-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.,\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0., 0., 0.],\n [0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3), 0., 0.],\n [(-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn - 2. * (-1) ** (b) * l ** 2 * k) / (I3 * w3), 0., \\\n (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.],\n [0., 0., 0., 0., 0., (-1) ** (1 + c) * l * gn / (I3 * w3)],\n [0., 0., (-1) ** (1 + b + c) * l ** 2 * k / (I3 * w3), 0.,\n (-1) ** (1 + c) * (-l * gn + (-1) ** (1 + b) * l ** 2 * k) / (I3 * w3), 0.]\n ])\n print 'exact matrix = ', matrix\n\n eigvals = np.array([\n 1j * l * gn / (I3 * w3),\n # -1j*l*gn/(I3*w3),\n l * np.sqrt(gn) * np.sqrt(0j - 3. * l * k * (-1) ** (b) - gn) / (I3 * w3),\n # -l*np.sqrt(gn)*np.sqrt(0j-3.*l*k*(-1)**(b) - gn)/(I3*w3),\n l * np.sqrt(gn) * np.sqrt(0j - l * k * (-1) ** (b) - gn) / (I3 * w3),\n # -l*np.sqrt(gn)*np.sqrt(0j - l*k*(-1)**(b) - gn)/(I3*w3)\n ])\n return eigvals\n else:\n return np.array([])", "def power_iteration(X):\n #X, languages=prepare_data_matrix()\n M=X\n M=M-np.mean(M, axis=0)\n M=np.cov(M, rowvar=False) #the covariance matrix, size 100x100\n x=np.ones(len(M)) #a random starting vector composed of 100 ones, it only cant be of all zeros\n difference=np.ones(len(x))\n\n #print(np.linalg.norm(difference))\n while np.linalg.norm(difference) >= 10**-5: #we iterate until the difference between the previous and the new x is really small, lets say 10^-5\n #print(x.T.shape)\n oldx=x\n z=M.dot((x.T))\n x=z.T\n x=x/np.linalg.norm(x)\n difference=np.linalg.norm(oldx-x)\n #the x that we get at the end of this loop is our eigenvector\n\n #print(x.dot(M).shape)\n #print(x.shape)\n y=(x.dot(M)).dot(x.T) #y is the corresponding eigenvalue to the eigenvector x\n \n return x, y", "def current(edges, transition_matrix):\n ### Calculate the state frequecies ###\n # Eigenvalues and Eigenvectors of transition matrix\n vals, vl, vr = sp.linalg.eig(transition_matrix, left=True)\n # Find the eigenvalue that == 1\n index = list(vals).index(1)\n state_freq = vl[:,index]\n\n committor_plus = np.linalg.eig\n\n\n ### Calculate the flux matrix ###\n flux_matrix = np.multiply(transition_matrix, state_freq)\n return flux_matrix / flux_matrix.sum(axis=1)", "def get_eigen_value(A, v):\n Av = np.dot(A, v)\n print(\"Mag v, should be 1:\", mag(v))\n lmb = mag(Av) / mag(v)\n return lmb", "def el_ph(om,eig,q,zs,mass,eps,rG,nmodes,nqpt,nat):\n\n # Initiate\n g = np.zeros((nqpt,nmodes),dtype=complex)\n\n # Initiate q+G\n qpG = np.zeros((nqpt,3))\n\n q_c = q[:,0:3] \n q2 = np.zeros(nqpt)\n N = 5 # Initial size of G-point grid used for sum\n\n alpha = 5.0 # Convergence parameter\n\n for nn in range(-N,N+1):\n for mm in range(-N,N+1):\n for ll in range(-N,N+1):\n #\n for ic in range(3):\n qpG[:,ic] = q_c[:,ic] + nn*rG[0,ic] + mm*rG[1,ic] + ll*rG[2,ic]\n # IMPORTANT : Put a check here that qpG is nonzero! (break the loop if so)\n # Denominator\n q2[:] = 0.0\n for ia in range(3): \n for ib in range(3):\n q2[:] += qpG[:,ia]*eps[ia,ib]*qpG[:,ib]\n # \n inv_q2 = 1.0 / (q2 + 1e-10)\n arg = np.exp(-0.25 * np.sum(qpG**2, axis=1) / alpha) * inv_q2 # exp((q+G)^2/4a)\n \n for imod in range(nmodes):\n for ia in range(3):\n for ib in range(3):\n for iat in range(nat):\n g[:,imod] += arg[:]*qpG[:,ia]*zs[iat,ia,ib]*eig[imod,:,iat,ib] \\\n / np.sqrt(2.0*mass[iat]*np.abs(om[imod,:])+1e-10)\n\n return g", "def compute_steady_state_pi(adj_mat):\n\n return 1. * np.sum(adj_mat, axis=0) / np.sum(adj_mat) # d_j / 2|E|", "def test_em_nonlinear(self):\n z_matrix = np.array(\n [[0.00000000, 0.00000000, 0.00000000],\n [0.00000000, 0.00000000, 0.16666667],\n [0.03333333, 0.08333333, 0.00000000],\n [0.03333333, 0.08333333, 0.16666667],\n [0.06666667, 0.16666667, 0.00000000],\n [0.06666667, 0.16666667, 0.16666667],\n [0.10000000, 0.16666667, 0.00000000],\n [0.10000000, 0.16666667, 0.16666667],\n [0.13333333, 0.08333333, 0.00000000],\n [0.13333333, 0.08333333, 0.16666667],\n [0.16666667, 0.00000000, 0.00000000],\n [0.16666667, 0.00000000, 0.16666667]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"EM\")\n expected_w_vector = np.array(\n [0.20724531, 0.31710188, 0.47565280],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)", "def gaussianElimKer(M, zero, one):\n # V satisfies the invariant\n # M = V M_0\n V = [Polynomial([zero] * i + [one]) for i in range(len(M))]\n pivots = [None] * (len(M) + 1)\n for l in range(len(M)):\n while M[l].deg >= 0:\n idp = M[l].deg\n if pivots[idp] is None:\n pivots[idp] = l\n break\n else:\n c = M[l][idp] / M[pivots[idp]][idp]\n M[l] -= c * M[pivots[idp]]\n V[l] -= c * V[pivots[idp]]\n else:\n # If a line is null, we found an element of the kernel\n return V[l]\n return None", "def eigen_decomp(matrix):\n w = None\n v = None\n ### YOUR CODE HERE\n w,v=np.linalg.eig(matrix)\n ### END YOUR CODE\n return w, v", "def evd_spd_decomposition(P):\n\t\n\t# Assert Matrix P is symetric\n\tassert check_symmetric(P)\t\n\n\t# singular value decomposition\n\tL, Q = np.linalg.eig(P)\n\n\t#if L and Q returned in incorrect order\n\t#L = np.sort(L)\n\t#Q = Q[:, L.argsort()]\n\n\t# Create matrix W = Vtsqrt(diagnol(D))\n\tM = np.dot(Q, np.sqrt(np.diag(L)))\n\n\treturn M", "def posdef_eig_svd(mat):\n evals, evecs, _ = tf.svd(mat)\n\n return evals, evecs", "def eig(self,manifold_num):\n num_sites = len(self.energies[manifold_num])\n ham = self.manifold_hamiltonian(manifold_num).toarray()\n eigvals, eigvecs = eigh(ham)\n # Force degenerate eigenvectors to be orthogonal\n if self.qr_flag:\n eigvecs, r = np.linalg.qr(eigvecs,mode='reduced')\n if self.check_eigenvectors:\n HV = ham.dot(eigvecs)\n D = eigvecs.T.dot(HV)\n if np.allclose(D,np.diag(eigvals),rtol=1E-11,atol=1E-11):\n pass\n else:\n # warnings.warn('Eigenvalues altered by QR factorization, max absolute change in diagonal matrix of {}'.format(np.max(D-np.diag(eigvals))))\n warnings.warn('Using eigenvectors to diagonalize hamiltonian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))\n \n sort_indices = eigvals.argsort()\n eigvals.sort()\n eigvecs = eigvecs[:,sort_indices]\n if self.qr_flag:\n r = r[:,sort_indices]\n self.r_mats.append(r)\n # I choose to pick the phase of my eigenvectors such that the state which has the\n # largest overlap has a positive overlap. For sufficiently small d, and alpha close\n # to 1, this will be the overlap between the same excited and ground states.\n for i in range(eigvals.size):\n max_index = np.argmax(np.abs(eigvecs[:,i]))\n if eigvecs[max_index,i] < 0:\n eigvecs[:,i] *= -1\n\n return eigvals, eigvecs", "def get_init_point(M, d):\n return normalize(np.linalg.solve(M, d))", "def analytical_eig(A):\n n = len(A)\n h = 1/float(n)\n d = 2/float(h)**2\n a = -1/float(h)**2\n eigenval = np.empty(n)\n for j in range(1,n+1):\n eigenval[j-1] = d + 2*a*np.cos((j*np.pi)/(float(n)+1)) # Analytic solution\n \n return eigenval", "def _measmod_ekf0(ivp, prior, evlvar):\n spatialdim = prior.spatialdim\n h0 = prior.proj2coord(coord=0)\n h1 = prior.proj2coord(coord=1)\n\n def dyna(t, x, **kwargs):\n return h1 @ x - ivp.rhs(t, h0 @ x)\n\n def diff(t, **kwargs):\n return evlvar * np.eye(spatialdim)\n\n def jaco(t, x, **kwargs):\n return h1\n\n return DiscreteGaussianModel(dyna, diff, jaco)", "def posdef_inv_eig(tensor, identity, damping):\n eigenvalues, eigenvectors = tf.self_adjoint_eig(tensor + damping * identity)\n return tf.matmul(eigenvectors / eigenvalues, eigenvectors, transpose_b=True)", "def eigen_decomposition(self):\n w, V = linalg.eigh(self.K)\n c = w[::-1]\n if isinstance(self.num_xi, float):\n percent_energy = np.cumsum(c) / np.sum(c)\n self.num_xi = np.arange(c.shape[0])[percent_energy < self.num_xi][-1] # num_xi changes\n self.Lambda = w[::-1][:self.num_xi]\n self.V = V[:, ::-1][:, :self.num_xi]", "def _gpinv(p, k, sigma):\n x = np.full_like(p, np.nan)\n if sigma <= 0:\n return x\n ok = (p > 0) & (p < 1)\n if np.all(ok):\n if np.abs(k) < np.finfo(float).eps:\n x = - np.log1p(-p)\n else:\n x = np.expm1(-k * np.log1p(-p)) / k\n x *= sigma\n else:\n if np.abs(k) < np.finfo(float).eps:\n x[ok] = - np.log1p(-p[ok])\n else:\n x[ok] = np.expm1(-k * np.log1p(-p[ok])) / k\n x *= sigma\n x[p == 0] = 0\n if k >= 0:\n x[p == 1] = np.inf\n else:\n x[p == 1] = - sigma / k\n\n return x", "def get_PSSM_from_weight_matrix(emat,factor):\n \n # need to reverse sign for PSSM\n emat = -emat\n # set lowest element to zero\n emat = emat - emat.min(axis=0)\n # exponentiate\n p = sp.exp(factor*emat)\n p = p/p.sum(axis=0)\n return p", "def get_sigmazinv(self):\n\n try:\n out = np.diag(1 / self.eigen_x)\n except AttributeError:\n self.get_eigen(predictor=True)\n out = np.diag(1 / self.eigen_x)\n return out", "def E0_prime(self):\n delta_electrons = self._GetElectronDiff()\n assert delta_electrons != 0\n return - self.DeltaG0Prime() / (constants.F*delta_electrons)", "def bastos_ohagen(mat, eps=1e-16):\n mat_ref = numpy.asfarray(mat)\n mat = mat_ref.copy()\n diag_max = numpy.diag(mat).max()\n assert len(mat.shape) == 2\n size = len(mat)\n\n hitri = numpy.zeros((size, size))\n piv = numpy.arange(size)\n\n for idx in range(size):\n\n idx_max = numpy.argmax(numpy.diag(mat[idx:, idx:])) + idx\n\n if mat[idx_max, idx_max] <= numpy.abs(diag_max*eps):\n\n if not idx:\n raise ValueError(\"Purly negative definite\")\n\n for j in range(idx, size):\n hitri[j, j] = hitri[j-1, j-1]/float(j)\n\n break\n\n tmp = mat[:, idx].copy()\n mat[:, idx] = mat[:, idx_max]\n mat[:, idx_max] = tmp\n tmp = hitri[:, idx].copy()\n hitri[:, idx] = hitri[:, idx_max]\n hitri[:, idx_max] = tmp\n tmp = mat[idx, :].copy()\n mat[idx, :] = mat[idx_max, :]\n mat[idx_max, :] = tmp\n piv[idx], piv[idx_max] = piv[idx_max], piv[idx]\n\n hitri[idx, idx] = numpy.sqrt(mat[idx, idx])\n rval = mat[idx, idx+1:]/hitri[idx, idx]\n hitri[idx, idx+1:] = rval\n mat[idx+1:, idx+1:] -= numpy.outer(rval, rval)\n\n perm = numpy.zeros((size, size), dtype=int)\n for idx in range(size):\n perm[idx, piv[idx]] = 1\n\n return perm, hitri.T", "def getMomentumMap(self):\n return self.p_array * -1e-6 * epsilon_e", "def test_gaussian_em():\n fname = \"gmm-3-10-0.7.npz\"\n gmm = GaussianMixtureModel.generate( fname, 3, 3 )\n k, d, M, S, w = gmm.k, gmm.d, gmm.means, gmm.sigmas, gmm.weights\n N, n = 1e6, 1e5\n\n\n X = gmm.sample( N, n )\n\n algo = GaussianMixtureEM(k, d)\n\n def report( i, O_, lhood ):\n M_, _, _ = O_\n lhood, Z, O_ = algo.run( X, None, report )\n\n M_, S_, w_ = O_\n\n M_ = closest_permuted_matrix( M, M_ )\n w_ = closest_permuted_vector( w, w_ )\n\n print w, w_\n\n print norm( M - M_ )/norm(M)\n print abs(S - S_).max()\n print norm( w - w_ ) \n\n assert( norm( M - M_ )/norm(M) < 1e-1 )\n assert (abs(S - S_) < 1 ).all()\n assert( norm( w - w_ ) < 1e-2 )", "def EntropyKS(graph): \n if len(graph.nodes)>1:\n M = Adjacency(graph)\n try:\n eig = np.real(np.linalg.eig(M)[0])\n except:\n eig = np.ones((1,))\n lambd = np.max(eig)\n else:\n lambd = 1\n return np.log2(np.round(lambd,8))", "def M_to_E(M, ecc):\n with u.set_enabled_equivalencies(u.dimensionless_angles()):\n E = optimize.newton(_kepler_equation, M, _kepler_equation_prime,\n args=(M, ecc))\n return E", "def eigsolve(self,**kwargs):\n return eigsolve(self,**kwargs)", "def _sigma_ep(self,gam,eps):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return self._sigma_1(gam,eps)", "def mumps_eigsh(matrix, k, sigma, **kwargs):\n class LuInv(sla.LinearOperator):\n\n def __init__(self, matrix):\n instance = kwant.linalg.mumps.MUMPSContext()\n instance.analyze(matrix, ordering='pord')\n instance.factor(matrix)\n self.solve = instance.solve\n sla.LinearOperator.__init__(self, matrix.dtype, matrix.shape)\n\n def _matvec(self, x):\n return self.solve(x.astype(self.dtype))\n\n opinv = LuInv(matrix - sigma * sp.identity(matrix.shape[0]))\n return sla.eigsh(matrix, k, sigma=sigma, OPinv=opinv, **kwargs)", "def find_eigen_kpar(phcs, k0a, qa, nmode, mode=\"E\"):\n\n fr = phcs.fr\n \n if mode == \"E\":\n ep = phcs.ep\n mu = phcs.mu\n else:\n ep = -np.array(phcs.mu)\n mu = -np.array(phcs.ep)\n \n n1 = np.sqrt(max(phcs.ep))\n \n def f(k_parallel):\n kya = [np.sqrt(mu[i] * ep[i] * (k0a) ** 2 - k_parallel ** 2 + 0j) for i in range(2)]\n eta = (kya[1] * mu[0]) / (kya[0] * mu[1])\n output = np.cos(qa) - np.cos(kya[0] * (1 - fr)) * np.cos(kya[1] * fr) + 0.5 * (eta + 1 / eta) * np.sin(\n kya[0] * (1 - fr)) * np.sin(kya[1] * fr)\n return output.real\n \n def fi(k_parallel):\n return f(1j * k_parallel)\n \n \n if abs(phcs.ep[1] - phcs.ep[0]) * min([1 - fr, fr]) < 0.2:\n real_k_parallel = find_real_roots_for_small_and_big_q(f, qa)\n else:\n real_k_parallel = find_real_roots(f, n1 * k0a + 0.12)\n \n nreal = len(real_k_parallel)\n if nreal < nmode:\n nimag = nmode - nreal\n imag_k_parallel = 1j * np.array(find_n_roots_for_small_and_big_q(fi, qa, nimag))\n return real_k_parallel, imag_k_parallel\n else:\n return real_k_parallel[0:nmode], []", "def get_second_eigenvector(graph):\n ###TODO\n pass", "def vsq_from_E(E):\n Ej=E*1.6021*10**-22\n m=1.674929*10**-27\n return (2.*Ej)/m", "def reflect_eigenvectors(x):\n # random reference vector\n xnew = x.copy()\n for v in range(x.shape[-1]):\n cum_sum = x[0, :, v]\n cum_sum /= np.linalg.norm(cum_sum)\n for i in np.arange(1, x.shape[0]): \n if np.any(np.isnan(x[i, :, v])):\n xnew[i, :, v] = x[i, :, v]\n else:\n cos = cum_sum.dot(x[i, :, v])\n if cos > 0:\n cum_sum += x[i, :, v]\n cum_sum /= np.linalg.norm(cum_sum)\n\n else:\n cum_sum += np.negative(x[i, :, v])\n cum_sum /= np.linalg.norm(cum_sum)\n xnew[i, :, v] = np.negative(x[i, :, v])\n \n return xnew", "def compute_eigvals(*params, **hyperparams):\n phi = params[0]\n d, t = hyperparams[\"dimension\"]\n\n if qml.math.get_interface(phi) == \"tensorflow\":\n phase = qml.math.exp(1j * qml.math.cast_like(phi, 1j))\n minus_phase = qml.math.exp(-1j * qml.math.cast_like(phi, 1j))\n return stack_last([phase if index < d else minus_phase for index in range(t)])\n\n arg = 1j * phi\n prefactors = qml.math.array([1 if index < d else -1 for index in range(t)], like=phi)\n\n if qml.math.ndim(phi) == 0:\n product = arg * prefactors\n else:\n product = qml.math.outer(arg, prefactors)\n return qml.math.exp(product)", "def find_local_energy(self):\n state = self.current_state\n (mat_elements, spin_flip_sites) = self.hamiltonian.find_nonzero_elements(state)\n\n flipped_states = [np.copy(state) for _ in spin_flip_sites]\n for i, site in enumerate(spin_flip_sites):\n flipped_states[i][0][site] *= -1\n\n energies = [self.amplitude_ratio(state, flipped_states[i])* element for (i, element) in enumerate(mat_elements)]\n return sum(energies)", "def expms(A, eig=np.linalg.eigh):\r\n # TODO: check that this works reliably for low rank matrices\r\n # first: symmetrize A\r\n D, B = eig(A)\r\n return np.dot(B, (np.exp(D) * B).T)", "def fun(self, X):\n w = np.linalg.eigvalsh(X)\n # check for negative eigenvalues, but be forgiving for very small\n # negative values relative to the maximum eignvalue\n if np.any(np.min(w, axis=-1) < -np.spacing(np.max(w, axis=-1))):\n return np.inf\n else:\n return 0", "def get_whitener( A, k ):\n\n assert( mrank( A ) == k )\n # Verify PSD\n e = eigvals( A )[:k].real\n if not (e >= 0).all():\n print \"Warning: Not PSD\"\n print e\n\n # If A is PSD\n U, _, _ = svdk( A, k )\n A2 = cholesky( U.T.dot( A ).dot( U ) )\n W, Wt = U.dot( pinv( A2 ) ), U.dot( A2 )\n \n return W, Wt", "def smallest_inf_norm_mpmath(V):\n minc = mpmath.mpf(100)\n mi = 0\n for j in range(V.cols):\n maxr = mpmath.mpf(0)\n for k in range(V.rows):\n t = abs(V[k, j])\n if(t > maxr):\n maxr = t\n if(maxr < minc):\n minc = maxr\n mi = j\n return minc", "def normal_modes(self, finite_step):\n\n # Get the mass weighted hessian matrix in amu\n hessian = self.calculate_hessian(finite_step)\n\n # Now get the eigenvalues and vectors\n e_vals, e_vectors = np.linalg.eig(hessian)\n print(e_vals)\n print(e_vectors)", "def e_step(self):\n # update VMF probabilities (Equation (3))\n logP = np.dot(self.features, self.mu.T)*self.kappa + np.log(self.pi).reshape(1,-1) # n by k\n logP_norm = logP - logsumexp(logP, axis=1).reshape(-1,1)\n self.p = np.exp(logP_norm)\n self.mllk = np.mean(logsumexp(logP, axis=1))", "def find_single_eigen(Potential,\n energy_guess,\n rmin, rmax, npoints,\n tolerance,\n max_iterations=100,\n diagnostics=False):\n\n count = 0\n energies = [energy_guess, ]\n h = (rmax - rmin) / (npoints - 1)\n while count < max_iterations:\n count += 1\n if diagnostics:\n print(\"Trying energy: \", energies[-1])\n new_energy, psi = update_energy(Potential, energies[-1],\n rmin, rmax, npoints)\n if diagnostics:\n f = open(\"diagnostic_wavefunction_generated.dat\", \"w\")\n for i, apsi in enumerate(psi):\n f.write(\"%f %f\\n\" % (rmin + i * h, apsi))\n f.close()\n\n energies.append(new_energy)\n if ((count >= 3) and\n (abs(energies[-1] - energies[-2]) <= tolerance) and\n (abs(energies[-2] - energies[-3]) <= tolerance)):\n break # energy converged\n else: # \"max_iterations\" reached without success:\n message = (\"Maximum iterations exceeded: %d\" % max_iterations)\n raise MaxIterationsError(message)\n # normalize wavefunction:\n factor = 1.0 / math.sqrt(h * sum([apsi * apsi for apsi in psi]))\n normalized_psi = [apsi * factor for apsi in psi]\n return {\"energy\": energies[-2], # this is energy corresponding to psi\n # not the most recent\n \"psi\": normalized_psi,\n \"rmin\": rmin,\n \"h\": h}", "def fermi(eps, mu, T):\n\n # T=0 case handled separately since (eps-mu)/T is NaN, not +inf\n # or -inf as required to generate step function\n if T == 0:\n return np.piecewise(eps, [eps < mu, eps == mu, eps > mu], [1, 0.5, 0])\n\n # suppress overflow warnings for this calculation. Numpy handles +inf\n # and -inf gracefully in this calculation.\n old_settings = np.seterr()\n np.seterr(over='ignore')\n result = 1/(1 + np.exp((eps - mu)/T))\n np.seterr(**old_settings)\n\n return result", "def eigenCheat( Ja, Jf, truncNum = scipy.inf ):\n H = glueEmH( Ja, Jf, truncNum )\n \n return scipy.linalg.eigh( H )", "def mXZpm_inf(mu,P,mXZj):\n mXZi = np.divide(np.multiply(np.exp(mu), mXZj), P)\n return mXZi, mXZi", "def get_singular_values(matrix, n):\n singular_values = None\n u, s, v = svd(matrix)\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n return singular_values", "def potential_energy_per_mode(proj_displ,eigvals): #,hess=None,check=False): \n return 0.5 * ( np.square(proj_displ).T * eigvals ).T #, 0.5 * proj_displ * omega_sqr @ proj_displ", "def FreeFall(inv_mass_matrix, g=1.0):\n\n def potential_energy(q):\n return jnp.sum(g * q[\"x\"])\n\n def kinetic_energy(p):\n v = jnp.multiply(inv_mass_matrix, p[\"x\"])\n return jnp.sum(0.5 * jnp.dot(v, p[\"x\"]))\n\n return potential_energy, kinetic_energy", "def vFrmE(E):\n Ej=E*1.6021*10**-22\n m=1.674929*10**-27\n v=np.sqrt((2.*Ej)/m)\n return(v)", "def test_em_linear(self):\n z_matrix = np.array(\n [[0.000, 0.000, 0.333],\n [0.033, 0.050, 0.267],\n [0.067, 0.100, 0.200],\n [0.100, 0.175, 0.100],\n [0.200, 0.200, 0.067],\n [0.267, 0.225, 0.033],\n [0.333, 0.250, 0.000]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"EM\")\n expected_w_vector = np.array(\n [0.37406776, 0.25186448, 0.37406776],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)", "def gaussian_elim(mat):\n up_mat = np.array(mat, dtype=float)\n n = up_mat.shape[0]\n for r in range(0,n-1):\n for rr in range(r+1, n):\n try:\n ratio = up_mat[rr][r] / up_mat[r][r]\n except ZeroDivisionError:\n print(\"zero\")\n continue\n for i in range(r,n):\n up_mat[rr][i] -= up_mat[r][i] * ratio\n return up_mat", "def Ham_eigvals(self,kx,ky):\n tHam=self.Ham_gen(kx,ky)\n eigval=np.linalg.eigvals(tHam)\n sidc=eigval.argsort()\n eigval=eigval[sidc]\n return eigval.real", "def eigen_vector_i_all(self):\n return self._eig_vec", "def gaussian_dense(matrix, two_sigma_square):\n\n return np.exp(- matrix / two_sigma_square)", "def matrix_eig(\n self,\n chis=None,\n eps=0,\n print_errors=\"deprecated\",\n hermitian=False,\n break_degenerate=False,\n degeneracy_eps=1e-6,\n sparse=False,\n trunc_err_func=None,\n evenTrunc = False,\n ):\n if print_errors != \"deprecated\":\n msg = (\n \"The `print_errors` keyword argument has been deprecated, \"\n \"and has no effect. Rely instead on getting the error as a \"\n \"return value, and print it yourself.\"\n )\n warnings.warn(msg)\n # If chis is not specfied, there is no even truncation scheme; else, we\n # keep track of the chi we specfied\n if chis is None:\n evenTrunc = False\n else:\n try:\n chis = list(chis)\n except TypeError:\n chis = [chis]\n chiSpec = max(chis)\n chis = self._matrix_decomp_format_chis(chis, eps)\n maxchi = max(chis)\n assert self.defval == 0\n assert self.invar\n assert self.charge == 0\n assert self.dirs[0] + self.dirs[1] == 0\n assert set(zip(self.qhape[0], self.shape[0])) == set(\n zip(self.qhape[1], self.shape[1])\n )\n\n S_dtype = np.float_ if hermitian else np.complex_\n U_dtype = self.dtype if hermitian else np.complex_\n\n # Eigenvalue decompose each sector at a time.\n # While doing so, also keep track of a list of all eigenvalues, as well\n # as a heap that gives the negative of the absolute value of the\n # largest eigenvalue in each sector. These will be needed later when\n # deciding how to truncate the eigenvalues.\n eigdecomps = {}\n dims = {}\n minusabs_next_eigs = []\n all_eigs = []\n for k, v in self.sects.items():\n if 0 in v.shape:\n # This matrix is empty and trivial.\n shp = v.shape\n m = min(shp)\n u = np.empty((shp[0], m), dtype=U_dtype)\n s = np.empty((m,), dtype=S_dtype)\n eigdecomp = (s, u)\n else:\n if sparse and maxchi < min(v.shape) - 1:\n if hermitian:\n s, u = spsla.eighs(\n v, k=maxchi, return_eigenvectors=True\n )\n else:\n s, u = spsla.eigs(\n v, k=maxchi, return_eigenvectors=True\n )\n else:\n if hermitian:\n s, u = np.linalg.eigh(v)\n else:\n s, u = np.linalg.eig(v)\n order = np.argsort(-np.abs(s))\n s = s[order]\n u = u[:, order]\n s = s.astype(S_dtype)\n u = u.astype(U_dtype)\n eigdecomp = (s, u)\n eigdecomps[k] = eigdecomp\n dims[k] = 0\n all_eigs.append(s)\n if 0 not in s.shape:\n heapq.heappush(minusabs_next_eigs, (-np.abs(s[0]), k))\n try:\n all_eigs = np.concatenate(all_eigs)\n except ValueError:\n all_eigs = np.array((0,))\n\n if sparse:\n norm_sq = self.norm_sq()\n else:\n norm_sq = None\n\n # Figure out what bond dimension to truncate to, how this bond\n # dimension is distributed over the different sectors, and what the\n # truncation error is.\n chi, dims, rel_err = type(self)._find_trunc_dim(\n all_eigs,\n eigdecomps,\n minusabs_next_eigs,\n dims,\n chis=chis,\n eps=eps,\n break_degenerate=break_degenerate,\n degeneracy_eps=degeneracy_eps,\n trunc_err_func=trunc_err_func,\n norm_sq=norm_sq,\n )\n\n # truncate in both sectors evenly\n if evenTrunc and chiSpec == chi:\n # This piece of codes is only designed\n # with Z2 symmetry tensor in mind\n errmeg = \"The matrix should have two sectors (0,0) and (1,1).\"\n assert len(dims) == 2, errmeg\n if chiSpec % 2 == 0:\n dims[(0, 0)] = int(chiSpec / 2)\n dims[(1, 1)] = int(chiSpec / 2)\n else:\n dims[(0, 0)] = int((chiSpec + 1) / 2)\n dims[(1, 1)] = int((chiSpec - 1) / 2)\n\n # Truncate each block and create the dim for the new index.\n new_dim = []\n new_qim = []\n eigdecomps = {k: v for k, v in eigdecomps.items() if dims[k] > 0}\n for k, v in eigdecomps.items():\n d = dims[k]\n if d > 0:\n new_dim.append(d)\n new_qim.append(k[0])\n eigdecomps[k] = (v[0][:d], v[1][:, :d])\n else:\n del eigdecomps[k]\n\n # Initialize S and U.\n d = self.dirs[0]\n S = type(self)(\n [new_dim],\n qhape=[new_qim],\n dirs=[d],\n qodulus=self.qodulus,\n dtype=S_dtype,\n invar=False,\n charge=0,\n )\n U = type(self)(\n [self.shape[0], new_dim],\n qhape=[self.qhape[0], new_qim],\n dirs=[d, -d],\n qodulus=self.qodulus,\n dtype=U_dtype,\n charge=0,\n )\n\n # Set the blocks of U, S and V.\n for k, v in eigdecomps.items():\n S[(k[0],)] = v[0]\n k_U = (k[0], k[0])\n U[k_U] = v[1]\n\n return S, U, rel_err", "def compute_e(E0, M, e):\r\n E1 = E0 - (E0 - e * sin(E0) - M) / (1 - e * cos(E0))\r\n if abs(abs(degrees(E1)) - abs(degrees(E0))) > 0.001:\r\n E1 = compute_e(E1, M, e)\r\n return E1", "def set_normal_free_energy(self):\n\t\t\n\t\tself.eps_base = self.mu_eps + self.normal_eps_tuning_prefactor* \\\n\t\t\t\t\t\tsp.exp(-(1.*sp.arange(self.Mm))**2.0/(2.0* \\\n\t\t\t\t\t\tself.normal_eps_tuning_width)**2.0)\n\t\t\t\t\t\t\n\t\tself.eps_base += random_matrix(self.Mm, params=[0, self.sigma_eps], \n\t\t\t\t\t\t\t\t\t\tseed=self.seed_eps)\n\t\t\n\t\t# If dual signal, use the average of the FULL signal nonzero components\n\t\tif self.Kk_split == 0:\n\t\t\tself.eps = self.WL_scaling*sp.log(self.mu_Ss0) + self.eps_base \n\t\telse:\n\t\t\tself.eps = self.WL_scaling*sp.log(sp.average(self.Ss\\\n\t\t\t\t\t\t\t[self.Ss != 0])) + self.eps_base\n\t\t\n\t\t# Apply max epsilon value to each component\n\t\tfor iM in range(self.Mm):\n\t\t\tif self.eps[iM] > self.max_eps:\n\t\t\t\tself.eps[iM] = self.max_eps\n\t\t\tif self.eps[iM] < self.min_eps:\n\t\t\t\tself.eps[iM] = self.min_eps", "def eig(self, q):\n q1, q2 = q.T\n c = np.sqrt(9.81*q1)\n lambda1 = q2/q1 - c\n lambda2 = q2/q1 + c\n return np.array([lambda1, lambda2])", "def find_all_params_from_kx(params):\r\n new_params = find_kzs(deepcopy(params))\r\n w = new_params['w']\r\n d_list = new_params['d_list']\r\n kx = new_params['kx']\r\n kz_list = new_params['kz_list']\r\n ex_list = new_params['ex_list']\r\n ez_list = new_params['ez_list']\r\n mu_list = new_params['mu_list']\r\n N = len(mu_list)\r\n \r\n mat = bc_matrix(new_params)\r\n eigenvals, eigenvecs = np.linalg.eig(mat)\r\n which_eigenval_is_zero = np.argmin(np.abs(eigenvals))\r\n null_vector = eigenvecs[:,which_eigenval_is_zero]\r\n if False:\r\n print('null vector:')\r\n print(null_vector)\r\n print('matrix entry absolute values:')\r\n print(np.abs(mat))\r\n print('abs(mat . null_vector) should be 0:')\r\n print(np.abs(np.dot(mat, null_vector)))\r\n print('calculated eigenvalue:')\r\n print(eigenvals[which_eigenval_is_zero])\r\n H_up_list = [0]\r\n H_up_list.extend(null_vector[i] for i in range(1, 2*N-2, 2))\r\n H_down_list = [null_vector[i] for i in range(0, 2*N-2, 2)]\r\n H_down_list.append(0)\r\n assert N == len(H_up_list) == len(H_down_list)\r\n \r\n Ex_up_list = [H_up_list[i] * kz_list[i] / (w * ex_list[i] * nu.eps0)\r\n for i in range(N)]\r\n Ex_down_list = [-H_down_list[i] * kz_list[i] / (w * ex_list[i] * nu.eps0)\r\n for i in range(N)]\r\n Ez_up_list = [-H_up_list[i] * kx / (w * ez_list[i] * nu.eps0)\r\n for i in range(N)]\r\n Ez_down_list = [-H_down_list[i] * kx / (w * ez_list[i] * nu.eps0)\r\n for i in range(N)]\r\n \r\n # normalize E and H.\r\n largest_Ez_up_index = np.argmax(np.abs(np.array(Ez_up_list)))\r\n scale_factor = (1 * nu.V/nu.nm) / Ez_up_list[largest_Ez_up_index]\r\n for X_list in [H_up_list, H_down_list, Ex_up_list, Ex_down_list,\r\n Ez_up_list, Ez_down_list]:\r\n for i in range(N):\r\n X_list[i] *= scale_factor\r\n new_params['H_up_list'] = H_up_list\r\n new_params['H_down_list'] = H_down_list\r\n new_params['Ex_up_list'] = Ex_up_list\r\n new_params['Ex_down_list'] = Ex_down_list\r\n new_params['Ez_up_list'] = Ez_up_list\r\n new_params['Ez_down_list'] = Ez_down_list\r\n \r\n # x-component of complex Poynting vector, integrated over a layer\r\n Sx_list = []\r\n for i in range(N):\r\n Ez_up = Ez_up_list[i]\r\n Ez_down = Ez_down_list[i]\r\n H_up_star = H_up_list[i].conjugate()\r\n H_down_star = H_down_list[i].conjugate()\r\n kz = kz_list[i]\r\n d = d_list[i]\r\n Sx = 0\r\n # add each term only if it's nonzero, to avoid 0 * nan in top and\r\n # bottom layers\r\n if Ez_up * H_up_star != 0:\r\n Sx += ((-Ez_up * H_up_star) / (4 * kz.imag)\r\n * (1 - cmath.exp(-2 * kz.imag * d)))\r\n if Ez_down * H_down_star != 0:\r\n Sx += ((-Ez_down * H_down_star) / (4 * kz.imag)\r\n * (1 - cmath.exp(-2 * kz.imag * d)))\r\n if Ez_down * H_up_star != 0:\r\n Sx += ((-Ez_down * H_up_star) / (4j * kz.real)\r\n * (1 - cmath.exp(-2j * kz.real * d))\r\n * cmath.exp(1j * kz * d))\r\n if Ez_up * H_down_star != 0:\r\n Sx += ((-Ez_up * H_down_star) / (4j * kz.real)\r\n * (1 - cmath.exp(-2j * kz.real * d))\r\n * cmath.exp(1j * kz * d))\r\n Sx_list.append(Sx)\r\n new_params['Sx_list'] = Sx_list\r\n # x-component of complex Poynting vector, integrated over all layers\r\n Sx_total = sum(Sx_list)\r\n new_params['Sx_total'] = Sx_total\r\n \r\n layer_bottom_list = [-inf, 0]\r\n for i in range(1,N-1):\r\n layer_bottom_list.append(layer_bottom_list[-1] + d_list[i])\r\n \r\n new_params['layer_bottom_list'] = layer_bottom_list\r\n return new_params", "def helmholtzenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n f = g - pres*g_p\n return f", "def x_for_initial_time_grid(self):\n tmp = self._Y * self._sqrt_eig_val.reshape(self._num_ev,1) \n if self.verbose > 1:\n print(\"calc process via matrix prod ...\")\n res = np.tensordot(tmp, self._eig_vec, axes=([0],[1])).flatten()\n if self.verbose > 1:\n print(\"done!\")\n \n return res", "def get_initial_condition_euler(self, tol=1e-10):\n Z = zeros((len(self._meshes), len(self._meshes[0].elements)+1))\n for mi, m in enumerate(self._meshes):\n if not m._left_lift:\n raise Exception(\"get_initial_condition_euler() only works if all boundary conditions are given on the left.\")\n\n Z[mi, 0] = m._left_value\n def get_F(Z, t):\n \"\"\"\n Evaluates the RHS for the vector Z and time tau.\n \"\"\"\n Z0 = zeros((len(self._meshes),))\n for mi, m in enumerate(self._meshes):\n Z0[mi] = self._F(mi, Z, t)\n return Z0\n def get_phi(Z, Zprev, tau, t):\n return Z - tau*get_F(Z, t) - Zprev\n def get_J(Z, tau, t):\n mat = eye(len(self._meshes))\n for i in range(len(self._meshes)):\n for j in range(len(self._meshes)):\n mat[i, j] += - tau*self._DFDY(i, j, Z, t)\n return mat\n\n # initial time and initial condition vector:\n tprev = self._meshes[0].elements[0].nodes[0].x\n Zprev = Z[:, 0].copy()\n Znext = Zprev[:].copy()\n for el_i in range(len(self._meshes[0].elements)):\n #print \"doing element:\", el_i\n tau = self._meshes[0].elements[el_i].length\n tnext = tprev + tau\n error = 1e10\n i = 0\n while error > tol:\n J = get_J(Zprev, tau, tprev)\n phi = get_phi(Znext, Zprev, tau, tprev)\n dZ = solve(J, -phi)\n Znext += dZ\n error_dZ = l2_norm(dZ)\n error_phi = l2_norm(get_phi(Znext, Zprev, tau, tnext))\n #print \"it=%d, l2_norm_dZ=%e, l2_norm_phi=%e\" % \\\n # (i, error_dZ, error_phi)\n error = max(error_dZ, error_phi)\n i += 1\n Z[:, el_i+1] = Znext[:].copy()\n Zprev = Znext[:].copy()\n tprev = tnext\n\n\n # now assign the Z to the vertex dofs and leave zeros in the bubbles\n Y = zeros((self.ndofs,))\n for mi, m in enumerate(self._meshes):\n coeffs_one_mesh = Z[mi, 1:]\n Y[m.dof_start:m.dof_start+len(coeffs_one_mesh)] = coeffs_one_mesh\n return Y", "def energy(p,m):\n return math.sqrt(p*p + m*m)", "def _emiss_ep(self,Eph):\n if self.weight_ep == 0.0:\n return np.zeros_like(Eph)\n\n gam = np.vstack(self._gam)\n eps = (Eph / mec2).decompose().value\n # compute integral with electron distribution\n emiss = c.cgs * trapz_loglog(np.vstack(self._nelec) * self._sigma_1(gam,eps),\n self._gam, axis=0).to(u.cm**2 / Eph.unit)\n return emiss", "def _get_jpcs(self, M_skew):\n evals, evecs = np.linalg.eig(M_skew)\n evecs = evecs.T\n # get rid of small real number\n evals_j = np.imag(evals)\n\n # sort in descending order\n sort_indices = np.argsort(-np.absolute(evals_j))\n return evals_j[sort_indices], evecs[sort_indices]", "def eigsh(A, M = None, k = 6, sigma = None, which = 'LM', v0=None,\n ncv = None, maxiter = None, tol = 0., return_eigenvectors = True,\n Minv = None, OPinv = None, mode = 'normal'):\n if M is not None:\n raise NotImplementedError(\"M is not currently supported!\")\n if v0 is not None:\n raise NotImplementedError(\"v0 is not currently supported!\")\n if ncv is not None:\n raise NotImplementedError(\"ncv is not currently supported!\")\n if Minv is not None:\n raise NotImplementedError(\"Minv is not currently supported!\")\n if OPinv is not None:\n raise NotImplementedError(\"OPinv is not currently supported!\")\n inp_data = FrovedisFeatureData(A, dense_kind='rowmajor')\n X = inp_data.get()\n x_dtype = inp_data.get_dtype()\n x_itype = inp_data.get_itype()\n dense = inp_data.is_dense()\n nrows = inp_data.numRows()\n ncols = inp_data.numCols()\n\n if nrows != ncols:\n raise ValueError('expected squared symmetric matrix (shape=%s)' % (inp_data.shape,))\n if k <= 0:\n raise ValueError('k must be greater than 0.')\n if k >= nrows:\n raise ValueError('k must be less than or equal to N for N * N square matrix.')\n if sigma is not None and not dense:\n raise ValueError('currently sigma is only supported for dense matrices.')\n if sigma is None:\n sigma = np.finfo(np.float32).max\n\n if which not in ['LM', 'SM', 'LA', 'SA', 'BE']:\n raise ValueError('which must be one of LM, SM, LA, SA, or BE')\n if mode in ['buckling', 'cayley']:\n raise ValueError('currenly normal mode is only supported!')\n if maxiter is None:\n maxiter = 10 * nrows\n wantEv = return_eigenvectors\n (host, port) = FrovedisServer.getServerInstance()\n res = rpclib.compute_eigsh(host, port, X.get(),\n k, which.encode('ascii'),\n sigma, maxiter, wantEv,\n tol, x_dtype,\n x_itype, dense)\n excpt = rpclib.check_server_exception()\n if excpt[\"status\"]:\n raise RuntimeError(excpt[\"info\"])\n sptr = res[\"eigenval\"]\n uptr = res[\"eigenvec\"]\n m_m = res['m']\n k_k = res['k']\n eigval = FrovedisVector({'dptr' : sptr, 'size' : k_k},\n dtype = TypeUtil.to_numpy_dtype(x_dtype)).to_numpy_array()\n if wantEv:\n eigvec = FrovedisDenseMatrix('C', {'dptr' : uptr, 'nrow' : m_m, 'ncol' : k_k},\n dtype = TypeUtil.to_numpy_dtype(x_dtype)).to_numpy_array()\n return eigval, eigvec\n else:\n return eigval", "def Mtof(e,M):\n #first calculate eccentric anomaly (bigE)\n f = np.zeros(len(e))\n for i in np.arange(0,len(e)):\n n=0.\n delta=1000.\n bigE = M[i] - e[i]*np.sin(M[i]) \n while (n<1.e4 and delta>1.e-6):\n f1 = bigE - e[i]*np.sin(bigE) - M[i]\n fp = 1.0 - e[i]*np.cos(bigE)\n delta = -f1/fp\n bigE = bigE + delta\n n = n + 1\n f[i] = 2.*np.arctan( ((1. + e[i])/(1. - e[i]))**0.5 * np.tan(bigE/2.) )\n return f", "def calc_J():\n return np.random.normal(loc=0, scale=1) #loc means - mean, scale -std", "def local_energy(self):\n state = self.current_state\n (matrix_elements, transitions) = \\\n self.hamiltonian.find_matrix_elements(state)\n energy_list = [self.nqs.amplitude_ratio(state, transitions[i]) * mel\n for (i, mel) in enumerate(matrix_elements)]\n return sum(energy_list)", "def poweig(A, x0, maxiter = 100, ztol= 1.0e-5, mode= 0, teststeps=1):\n m = len(A)\n xi = x0[:] \n \n for n in range(maxiter):\n # matrix vector multiplication.\n xim1 = xi[:]\n for i in range(m):\n xi[i] = 0.0\n for j in range(m):\n xi[i] += A[i][j] * xim1[j]\n print n, xi\n if mode == 0:\n vlen = sqrt(sum([xi[k]**2 for k in range(m)]))\n xi = [xi[k] /vlen for k in range(m)]\n elif mode == 1:\n for k in range(m-1, -1, -1):\n c = abs(xi[k])\n if c > 1.0e-5:\n xi = [xi[k] /c for k in range(m)]\n break\n # early termination test.\n if n % teststeps == 0:\n S = sum([xi[k]-xim1[k] for k in range(m)])\n if abs(S) < ztol:\n break\n #print n, xi\n # Compute Rayleigh quotient.\n numer = sum([xi[k] * xim1[k] for k in range(m)])\n denom = sum([xim1[k]**2 for k in range(m)])\n xlambda = numer/denom\n return xlambda, xi", "def epipoles_location(f_mat):\r\n u, s, vh = np.linalg.svd(f_mat)\r\n e_l = vh[-1, :]\r\n e_r = u[:, -1]\r\n # get x, y by dividing by w\r\n e_l = (e_l[0] / e_l[2], e_l[1] / e_l[2])\r\n e_r = (e_r[0] / e_r[2], e_r[1] / e_r[2])\r\n return e_l, e_r", "def PPMI(M):\n \n M=normalize(M)\n cols = np.sum(M, axis=0)\n rows = np.sum(M, axis=1).reshape((-1,1))\n s = np.sum(rows)\n \n P = s*M\n P /= cols\n P /= rows\n \n #P[np.where(P<0)] = 1.0\n P = np.log(P)\n\n #To avoid NaN when applying log\n P[np.isnan(P)] = 0.0\n P[np.isinf(P)] = 0.0\n P[np.isneginf(P)] = 0.0\n P[np.where(P<0)] = 0.0\n \n return(P)", "def get_ground_state(sparse_operator, initial_guess=None):\n values, vectors = scipy.sparse.linalg.eigsh(sparse_operator,\n k=1,\n v0=initial_guess,\n which='SA',\n maxiter=1e7)\n\n order = numpy.argsort(values)\n values = values[order]\n vectors = vectors[:, order]\n eigenvalue = values[0]\n eigenstate = vectors[:, 0]\n return eigenvalue, eigenstate.T", "def get_f(M, e):\n\n # Get eccentric anomaly\n def func_E(x,M,e):\n return M - x + e*np.sin(x)\n\n E = newton(func_E, 0.5, args=(M,e))\n\n # Get true anomaly from eccentric anomaly\n f = np.arccos((np.cos(E)-e)/(1.0-e*np.cos(E)))\n if np.sin(E) < 0:\n f = 2.0*np.pi - f\n\n return f", "def body(state):\n v = state.v\n # normalize\n v_norm = arr_l2norm(v)\n v = v / v_norm\n # compute the next vector\n v_new = operator.times(v)\n # estimate the eigen value\n new_estimate = jnp.vdot(v, v_new)\n return PowerIterState(v=v_new, old_estimate=state.new_estimate, \n new_estimate=new_estimate, iterations=state.iterations+1)", "def compute_spectrum(P):\r\n evals, lvecs= linalg.eig(P,right=False, left=True)\r\n\r\n lvecs = lvecs/lvecs.sum(axis=0, keepdims=True)\r\n \r\n return evals, lvecs", "def _sigma_ee_nonrel(self,gam,eps):\n s0 = 4 * r0**2 * alpha / (15 * eps)\n x = 4 * eps / (gam**2 - 1)\n sigma_nonrel = s0 * self._F(x,gam)\n sigma_nonrel[np.where(eps >= 0.25*(gam**2 - 1.))] = 0.0\n sigma_nonrel[np.where(gam*np.ones_like(eps) < 1.0)] = 0.0\n return sigma_nonrel / mec2_unit", "def e_step(X, mu, sigma, phi):\n w = None\n\n #######################################################################\n # TODO: #\n # Perform the E-step of the EM algorithm. #\n # Use scipy.stats.multivariate_normal.pdf(...) to compute the pdf of #\n # of a gaussian with the current parameters. #\n #######################################################################\n\n w = np.zeros((X.shape[0], mu.shape[0]))\n for i in range(mu.shape[0]):\n w[:, i] = multivariate_normal(mu[i, :], sigma[i]).pdf(X)*phi[i]\n \n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n return w", "def runpower_one(matrix, n):\n\t#get initial vector\n\tv = np.zeros(n)\n\tw = np.zeros(n)\n\tfor j in range(n):\n\t\tv[j] = np.random.uniform(0,1)\n\t#print 'matrix', matrix\n\t#print 'v', v\n\tT = 10000 #number of iterations\n\ttol = 1e-06\n\toldnormw = 0\n\tfor t in range(T):\n\t\tw = matrix.dot(v)\n\t\t#print 't', t, 'w',w\n\t\tnormw = (np.inner(w,w))**.5\n\t\tv = w/normw\n\t\t#print 't',t,'v',v\n\t\t#print 't',t,'normw',normw, 'old', oldnormw\n\t\tif np.abs(normw - oldnormw)/normw < tol:\n\t\t\t#print ' breaking'\n\t\t\tbreak\n\t\toldnormw = normw\n\treturn normw, v", "def _compute_eigenmatrix(self, p, expand=False, factor=False,\n simplify=False):\n B = [Matrix(SR, [M[i] for M in p]) for i in range(self._.d + 1)]\n V = SR**(self._.d + 1)\n R = [[self._.d + 1, V, [Integer(1)]]]\n for i in range(1, self._.d + 1):\n S = sorted(([k, m, V.subspace_with_basis(b)]\n for k, b, m in B[i].eigenvectors_right()),\n key=lambda kvb: CoefficientList(kvb[0], self._.vars),\n reverse=True)\n j = 0\n while j < len(R):\n m, s, r = R[j]\n h = 0\n while h < len(S):\n k, v, b = S[h]\n sb = s.intersection(b)\n d = sb.dimension()\n if d == v:\n del S[h]\n else:\n S[h][1] -= d\n h += 1\n if d == m:\n R[j][1] = sb\n r.append(k)\n break\n elif d > 0:\n R.insert(j, [d, sb, r + [k]])\n j += 1\n m -= d\n R[j][0] = m\n j += 1\n assert len(R) == self._.d + 1 and all(len(r) == self._.d + 1\n for _, _, r in R), \\\n \"failed to compute the eigenmatrix\"\n return Matrix(SR, [r for _, _, r in R])", "def MDL_KLT(data):\n\n eigs = []\n p = 64\n N = len(data[0])//p\n for sig in data:\n splits = np.split(sig, N)\n cov_matrix = np.zeros((p, p), dtype=np.complex128)\n for split in splits:\n split /= np.mean(split)\n cov_matrix += np.outer(split, np.conj(split))\n\n eigv = np.real(scipy.linalg.eigvalsh(cov_matrix)[::-1])\n eigv = eigv/np.mean(eigv)\n \n best_k = 0\n best_MDL = float(\"inf\")\n for k in range(0,p):\n noise_eigs = eigv[k:]\n noise_dim = len(noise_eigs)\n ratio = gmean(noise_eigs)/np.mean(noise_eigs)\n cur_MDL = -np.log(ratio**(noise_dim*N)) + .5*k*(2*p-k)*np.log(N)\n if cur_MDL < best_MDL:\n best_k = k\n best_MDL = cur_MDL\n \n if best_k == 0:\n eigs.append(0)\n else:\n eigs.append(sum(eigv[:best_k]))\n \n return np.real(np.array(eigs))", "def regular(P):\n try:\n dim = P.shape[0]\n q = (P - np.eye(dim))\n ones = np.ones(dim)\n q = np.c_[q, ones]\n QTQ = np.dot(q, q.T)\n bQT = np.ones(dim)\n answer = np.linalg.solve(QTQ, bQT)\n if np.all(answer > 0):\n return answer\n else:\n return None\n except Exception as e:\n return None" ]
[ "0.62551945", "0.60196066", "0.59203595", "0.5908042", "0.589194", "0.5879424", "0.5878952", "0.5874368", "0.5866997", "0.5835588", "0.58008105", "0.5785766", "0.5769242", "0.576087", "0.5740334", "0.5668045", "0.55842084", "0.5565213", "0.5563019", "0.55508184", "0.55072254", "0.5490288", "0.5480005", "0.5477882", "0.5471057", "0.5466267", "0.5446539", "0.5446466", "0.5414399", "0.54142153", "0.5401303", "0.53931516", "0.53910476", "0.5386933", "0.5383734", "0.5369902", "0.5362573", "0.53560174", "0.5354308", "0.5353061", "0.53369045", "0.53315467", "0.53247464", "0.53023726", "0.5291905", "0.52729505", "0.52656287", "0.52635944", "0.5261137", "0.5259032", "0.5245464", "0.52424103", "0.5235796", "0.52252394", "0.5218647", "0.52121097", "0.5211751", "0.52077013", "0.5198228", "0.5197519", "0.51931804", "0.51903236", "0.51887083", "0.5187353", "0.518735", "0.5186982", "0.5181537", "0.5174754", "0.51740825", "0.51708364", "0.5170024", "0.51687413", "0.5158459", "0.51550305", "0.5147479", "0.5145693", "0.51444834", "0.5133926", "0.51317763", "0.5131371", "0.5123269", "0.51228917", "0.5120484", "0.51165783", "0.5116178", "0.5114152", "0.51122683", "0.51097", "0.51090795", "0.5106913", "0.5105548", "0.51045567", "0.51014304", "0.5098677", "0.50985235", "0.5096214", "0.5091191", "0.50907505", "0.50829756", "0.50824726" ]
0.763889
0
Inverse of np.block. Set axis to (2, 1) to modify the order of the result.
def unblock(arr: np.ndarray, n1: int, n2: int, axis1: int = -1, axis2: int = -2, blocksize: bool = False) -> np.ndarray: """ test (stackoverflow): Ok, so considering I have N block matrices with bm x bn dimension and want to stack them in a m x n matrix, provided N = m x n, I would then have x.reshape(m,n,bm,bn).swapaxes(1,2).reshape(bm*m,-1) """ s = np.array(arr.shape) if s[axis1] % n1 != 0 or s[axis2] % n2 != 0: raise ValueError(f"{s[axis1]}x{s[axis2]} does not divide by {n1}x{n2}") if blocksize: n1 = s[axis1] // n1 n2 = s[axis2] // n2 # this first .split adds a new dimensions on the outside, so if a absolute index # is given for the second axis it must be moved one to the right if axis2 >= 0: _axis2 = axis2 + 1 else: _axis2 = axis2 arr = np.array(np.split(arr, n1, axis1)) arr = np.array(np.split(arr, n2, _axis2)) inv_blocksize = n1 * n2 total = s[axis1] * s[axis2] s[axis2] = inv_blocksize s[axis1] = total // inv_blocksize return np.reshape(arr, s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse(self):\n data = np.linalg.inv(self._data)\n return self.create(self.rows, self.cols, data)", "def inverse(self, x, y):", "def inverse(self: Float[LinearOperator, \"*batch N N\"]) -> Float[LinearOperator, \"*batch N N\"]:\n return self.__class__(self._diag.reciprocal())", "def invert(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot invert a non-square matrix\")\n if self.determinant == 0:\n raise exc.LinearAlgebraError(\"cannot invert a singular matrix\")\n # TODO: implement block matrices in their own method\n block_rows = [r1 + r2 for r1, r2 in\n zip(self.data, self.makeIdentity(self.m).data)]\n inverse_block = Matrix.fromRows(block_rows).row_reduce()\n return inverse_block.subset([i for i in range(self.m)],\n [j + self.n for j in range(self.n)])", "def _inv(self) -> None:\n\n self.inv(inplace=True)", "def inverse(self,mat):\n result = np.linalg.inv(mat)\n self.out = result\n return self.out", "def inverse_transform(self, y: Array2D) -> Array2D:", "def inv_inplace(a):", "def inverse(self):\n self.check_square()\n\n\n N = self.rows\n\n inverse = make_matrix(N, N)\n\n # Solve on a per-column basis using Ax = b formalism\n for j in range(N):\n b = make_matrix(N, 1)\n b[j, 0] = 1\n\n x = self.solve_linear_system(b)\n\n for i in range(N):\n inverse[i, j] = x[i, 0]\n\n return inverse", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def inverse(self):\n group = self.group\n r = tuple([(i, -j) for i, j in self.array_form[::-1]])\n return group.dtype(r)", "def inverse(self):\n return self.solve(Matrix.I(self.nrows))", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n inverse = [[1/self.g[0][0]]];\n else:\n a = self.g[0][0];\n b = self.g[0][1];\n c = self.g[1][0];\n d = self.g[1][1];\n if(a*d==b*c):\n raise ValueError('matrix does not have a inverse!');\n else:\n weigh = 1/(a*d-b*c);\n inverse = [[weigh*d,weigh*-1*b],[weigh*-1*c,weigh*a]];\n return Matrix(inverse);", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here", "def inverse(im): \t \n x,y = np.shape(im)\n img = np.zeros([x,y])\n\t\n for i in range(x):\n for j in range(y):\n img[i,j] = 255 - im[i,j]\n return img", "def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return", "def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi", "def extract_inverse_covariance(self, block):\n return np.linalg.inv(self.cov)", "def inv(self, y):\n pass", "def inverse(self):\n cdef StdVectorFst result = self.copy()\n result.invert()\n return result", "def inverse(self, y):\n device = y.device\n return t.einsum('ij,k,kj->ik', y, 1. / t.sqrt(self.eig).to(device), self.rot.to(device))", "def inverse(self):\n invr = np.linalg.inv(self.affine_matrix)\n return SymmOp(invr)", "def invert(self) -> Frame:\n return Inverse(self)", "def _inverse_edges(edges: np.array) -> np.array:\n inversed_edges = edges.copy()\n inversed_edges[:, [0, 1]] = inversed_edges[:, [1, 0]]\n return inversed_edges", "def inv(self):\n self.inverse = not self._inverse\n return self", "def inverse(self):\n return self._inverse", "def inverse(self):\n if self.determinant() != 0:\n ops = reduce_to_red_echelon(self.data.copy(), True)[1]\n matrix = identity_matrix(self.n_rows).data\n \n if ops:\n if isinstance(ops[0], str):\n ops = [ops]\n \n for op in ops:\n if op[0] == 'swap':\n matrix = row_swap(matrix, op[1], op[2])\n elif op[0] == 'multiplication':\n matrix = row_multiply(matrix, op[1], op[2])\n elif op[0] == 'subtract':\n matrix = row_subtract(matrix, op[1], op[2], op[3])\n else:\n raise ValueError('Row operation not recognized')\n else:\n raise ValueError('Matrix has a determinant of 0 and is not invertible')\n return Matrix(matrix)", "def convert_flip(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n\n for i, ax in enumerate(axis):\n if i == 0:\n out = _op.reverse(x, ax)\n else:\n out = _op.reverse(out, ax)\n\n g.add_node(op.output(\"Out\")[0], out)", "def inverse_basis(T, dimensions, t):\n B = basis(T, dimensions, t)\n return inv(B.T.dot(B)).dot(B.T)", "def __invert__(self):\n return self.inverse()", "def inv(self):\n\n self.x, self.y = self.y, self.x\n self._x_, self._y_ = self._y_, self._x_\n self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac\n self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_\n self._u = 1 / self._u.conj()", "def _block_to_full(\n block_mat: np.ndarray, inverse: np.ndarray, shape: Tuple[int, ...]\n) -> np.ndarray:\n block_map = cartesian_product(inverse, inverse).T\n mat_by_edge = block_mat[block_map[0], block_map[1]]\n full_mat = mat_by_edge.reshape(shape)\n return full_mat", "def inverse(self):\n return self.invert()", "def inverse(self):\n if self.inv is None:\n if self.size > 0:\n self.inv = inverseIndex(self)\n else:\n self.inv = Connectivity()\n return self.inv", "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def invert(self, img):\n return self.inverse()(img)", "def _get_inv(self):\n m,d = self.B.shape\n Im = np.eye(m)\n Id = np.eye(d)\n BBt = [email protected]\n I_BBt_inv = np.linalg.pinv(Im + BBt)\n \n return (1/self.alpha)*(Id - self.B.T@( [email protected]/self.alpha))", "def GetInverse(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_GetInverse(self, *args)", "def inverse(self, point):\n raise NotImplementedError('The Lie group inverse is not implemented.')", "def __invert__(self):\n from sage.matrix.constructor import matrix\n from .comp import Components\n if self._is_identity:\n return self\n if self._inverse is None:\n if self._name is None:\n inv_name = None\n else:\n inv_name = self._name + '^(-1)'\n if self._latex_name is None:\n inv_latex_name = None\n else:\n inv_latex_name = self._latex_name + r'^{-1}'\n fmodule = self._fmodule\n si = fmodule._sindex\n nsi = fmodule._rank + si\n self._inverse = self.__class__(fmodule, inv_name, inv_latex_name)\n for basis in self._components:\n try:\n mat = self.matrix(basis)\n except (KeyError, ValueError):\n continue\n mat_inv = mat.inverse()\n cinv = Components(fmodule._ring, basis, 2, start_index=si,\n output_formatter=fmodule._output_formatter)\n for i in range(si, nsi):\n for j in range(si, nsi):\n cinv[i, j] = mat_inv[i-si,j-si]\n self._inverse._components[basis] = cinv\n self._inverse._inverse = self\n return self._inverse", "def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n", "def inverse(self: Float[LinearOperator, \"*batch N N\"]) -> Float[LinearOperator, \"*batch N N\"]:\n return ConstantDiagLinearOperator(self.diag_values.reciprocal(), diag_shape=self.diag_shape)", "def inverse(self):\n return ~self", "def inverse(self):\n # TODO\n # detA\n if not self.is_square():\n raise(\n ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(\n NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n mD = self.determinant()\n if self.h == 1:\n if self.g[0][0] = 0:\n raise(NotImplementedError,\n \"The 1x1 Matrix contains 0 can't inverse\")\n else:\n return [[1 / self.g[0][0]]] \n for i in range(self.h): # Calculates the inverse of a 2x2 Matrix.\n my_Matrix = zeroes(2, 2)\n my_Matrix.g[1][1] = self.g[0][0] / mD\n my_Matrix.g[0][0] = self.g[1][1] / mD\n my_Matrix.g[0][1] = - self.g[0][1] / mD\n my_Matrix.g[1][0] = - self.g[1][0] / mD\n return my_Matrix\n\n # trace A\n # 与矩阵TraceA * I identity 单位矩阵", "def invert(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square to invert\")\n\n A, operations = self.to_reduced_row_echelon()\n if not A.is_identity():\n return 0\n\n # If A was reduced to the identity matrix, then the same set of operations will take I to the inverse of A.\n # [A I] -> [I A^(-1)]\n\n I = IdentityMatrix(size = self.rows)\n for operation in operations:\n func = I.__getattribute__(operation[0])\n args = operation[1:]\n func(*args)\n\n return I", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse(self, x):\n x = np.asarray(x)\n def r(vec):\n return utils.recycled(vec, as_=x)\n if self.zero is not None and self.multiplier is not None:\n x = x / r(self.multiplier) + r(self.zero)\n elif self.zero is not None:\n x = x + r(self.zero)\n elif self.multiplier is not None:\n x = x / r(self.multiplier)\n return x", "def _block_to_full(block_mat, inverse, shape):\n # block_map = cartprod(inverse[0], inverse[1]).T\n block_map = cartprod(inverse, inverse).T\n mat_by_edge = block_mat[block_map[0], block_map[1]]\n full_mat = mat_by_edge.reshape(shape)\n return full_mat", "def inv(self):\n return self.conjugate()", "def inverse(self) -> 'Invertible':\n raise NotImplementedError", "def __invert__(self):\n try:\n B = ~(self.matrix())\n except ZeroDivisionError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")\n try:\n return self.parent().reversed()(B)\n except TypeError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")", "def Inverse(self, freedofs: pyngcore.BitArray = None, inverse: str = '') -> BaseMatrix:", "def Inverse(self, freedofs: pyngcore.BitArray = None, inverse: str = '') -> BaseMatrix:", "def inverse(self, z, y):\n y_summary = self.summary_net(y)\n return self.invertible_net(z, y_summary, inverse=True)", "def _inverse_transform(self, x):\n if x.atleast_2d().shape[1] != self.w.size:\n raise ValueError(\"array to revert must have {:} \"\n \"features (columns).\".format(self.w.size))\n\n v = (x - self.b).atleast_2d()\n\n v[:, self.w != 0] /= self.w[self.w != 0] # avoids division by zero\n\n return v.ravel() if x.ndim <= 1 else v", "def inv_heaviside(n, axis=0, normalized=True):\n w = jnp.sqrt(jnp.arange(n, 0, -1))\n\n times_u = lambda x: jnp.diff(x, prepend=0)\n trans_u = lambda x: -jnp.diff(x, append=0)\n\n times_n = lambda x: jnp.diff(x, prepend=0) * w\n trans_n = lambda x: -jnp.diff(x * w, append=0)\n\n times, trans = (times_n, trans_n) if normalized else (times_u, trans_u) \n times, trans = apply_along_axis(times, trans, axis)\n return Operator(times=times, trans=trans, shape=(n, n))", "def block_unbinding2(x,y):\n res = inv_binding_circular(x,y)\n return res", "def __invert__(self):\n a = self.array_form\n n = len(a)\n inv_form = [0] * n\n for i in xrange(n):\n inv_form[a[i]] = i\n return _new_from_array_form(inv_form)", "def inverse(self, x, y):\n if hasattr(x, '__iter__'):\n if len(x) != len(y):\n raise ValueError('x and y should be the same length!')\n\n return np.array([\n self.mechanism.inverse(xx, yy)\n for xx, yy in zip(x, y)\n ]).T\n\n return self.mechanism.inverse(x, y)", "def inverse(self: T) -> T:", "def inv(self):\n inv = np.linalg.inv(self._mat)\n return MoebTr(inv[0][0], inv[0][1], inv[1][0], inv[1][1])", "def inverseN(self):\r\n result = Matrix(self.rows, self.columns)\r\n for r in range(self.rows):\r\n for c in range(self.columns):\r\n result.mat[r][c] = self.cofactor(r, c)\r\n result.out()\r\n result = result.transpose()\r\n det = self.determinant()\r\n print(\"1/(\" + str(det) + \")\")\r\n result.out()\r\n return result", "def inverse_cayley_transform(z: torch.Tensor) -> torch.Tensor:\n identity = identity_like(z)\n i_identity = multiply_by_i(identity)\n\n z_minus_id = z - i_identity\n inv_z_plus_id = inverse(z + i_identity)\n return z_minus_id @ inv_z_plus_id", "def inverse_transform(self, X):\n\n pass # pragma: no cover", "def __invert__(self):\n return self.fam.c_unop('invert', self)", "def inverse(self):\n n = self.norm()\n c = self.conj()\n d = 1.0 / (n * n)\n c.scale(d)\n return c", "def inverted(self, dim = 0):\n return self.foreach(\n lambda k,v: (k[:dim] + (-k[dim],) + k[dim+1:],v),\n dimensions = self.dims,\n shape = self.shape,\n )", "def inverse(self, ys):\n with torch.no_grad():\n xs = torch.matmul(ys, torch.diag(torch.reciprocal(torch.exp(self.scaling_diag))))\n xs = self.layer4.inverse(xs)\n xs = self.layer3.inverse(xs)\n xs = self.layer2.inverse(xs)\n xs = self.layer1.inverse(xs)\n return xs", "def inverse(self):\n # find the determinant of the matrix\n determinant = self.determinant()\n # find the matrix of minors of the matrix\n matrix_of_minors = self.matrix_of_minors()\n # find the cofactor of the matrix of minors\n cofactor_matrix = self.cofactor_matrix(matrix_of_minors)\n # find the transpose of the cofactor matrix\n transpose_cofactor_matrix = self.transpose(cofactor_matrix)\n # find the adjugate (inverse) matrix\n inverse_matrix = self.adjugate_matrix(determinant, transpose_cofactor_matrix)\n\n return inverse_matrix", "def invert_inplace(a):", "def _inverse(self, x):\n alpha, beta = self._get_alpha_beta()\n diff = x - self.x0\n r = tf.linalg.norm(diff, axis=-1, keepdims=True)\n h = 1. / (alpha + r)\n beta_h = beta * h\n return x + beta_h * diff", "def inverse_transform(self, X, copy=...):\n ...", "def _inverse(self, y):\n d = self._compute_shared(y=y)\n rely = y - d.y_k # tf.where(d.out_of_bounds, tf.zeros_like(y), y - d.y_k)\n term2 = rely * (d.d_kp1 + d.d_k - 2 * d.s_k)\n # These terms are the a, b, c terms of the quadratic formula.\n a = d.h_k * (d.s_k - d.d_k) + term2\n b = d.h_k * d.d_k - term2\n c = -d.s_k * rely\n # The expression used here has better numerical behavior for small 4*a*c.\n relx = tf.where(\n tf.equal(rely, 0), tf.zeros_like(a),\n (2 * c) / (-b - tf.sqrt(b**2 - 4 * a * c)))\n return relx * d.w_k + d.x_k #tf.where(d.out_of_bounds, y, relx * d.w_k + d.x_k)", "def inverse_matrice(T):\n a,b,c,d = T[0][0],T[0][1],T[1][0],T[1][1]\n det = a*d-b*c\n aa,bb,cc,dd = d/det,-b/det,-c/det,a/det\n Tinv = [[aa,bb],[cc,dd]]\n return Tinv", "def invert(array):\n\n f = [1, 1, 1]\n\n result = np.array(array)\n\n for row in range(result.shape[0]):\n for pixel in range(result.shape[1]):\n result[row][pixel] = f - result[row][pixel]\n\n return result", "def complex_inverse(c1,cr):", "def inverse( m, context = FloatContext, copy_m=True ):\n n,n_ = shape_mat(m)\n assert (n==n_) #matris should be square\n\n return solve( m, eye(n), context=context, copy_b=False, copy_a=copy_m )", "def inverse(self):\n return Rotation(self.matrix.transposed())", "def inverse_rigid_trans(Tr): \n inv_Tr = np.zeros_like(Tr) # 3x4\n inv_Tr[0:3,0:3] = np.transpose(Tr[0:3,0:3])\n inv_Tr[0:3,3] = np.dot(-np.transpose(Tr[0:3,0:3]), Tr[0:3,3])\n return inv_Tr", "def __invert__(self):\n \n return Vector(-self.y, self.x)", "def inverted_residual_block(inputs, filters, kernel, t, strides, n, alpha, block_id, train_bn=False):\n\n x = bottleneck(inputs, filters, kernel, t, strides, False, alpha, block_id, train_bn)\n\n for i in range(1, n):\n block_id += 1\n x = bottleneck(x, filters, kernel, t, 1, True, alpha, block_id, train_bn)\n\n return x", "def invert(x):\n return linalg.inv(x)", "def _inverse_lines(self):\n pass", "def inverse_transform(self, matrix):\n\n x = matrix.shape[0]\n y = matrix.shape[1]\n N = x\n\n # Inverse Fourier Transform matrix:\n ift = np.zeros([x, y], complex)\n\n for i in range(0, x):\n for j in range(0, y):\n sum_ift = 0\n for u in range(0, x):\n for v in range(0, y):\n sum_ift = sum_ift + matrix[u, v] * (np.cos(((2 * np.pi) / N) * (u * i + v * j)) + 1j * np.sin(((2 * np.pi) / N) * (u * i + v * j)))\n\n ift[i, j] = sum_ift\n\n\n return ift/(x*x)", "def inverse(self, x):\n return self.mul(self.weights, x.unsqueeze(-1)).squeeze(-1) + self.shift\n #return self.mul(torch.inverse(self.weights), (x - self.shift).unsqueeze(-1)).squeeze(-1)", "def invert(self):\n self._c = ~self._c", "def inverse(self):\n myMatrix = np.array(self.Matrix)\n if np.linalg.det(myMatrix) == 0:\n print(\"This matrix has a determinant of 0, meaning it has no inverse\")\n else:\n self.Inverse = np.linalg.inv(myMatrix)\n print(\"This is the inverse to your matrix: \", self.Inverse)", "def inverse(self):\n\n return Shape(lambda site: not self.shape(site))", "def inv(self):\n return MoebGen(self._d / self._det, - self._b / self._det, - self._c / self._det, self._a / self._det)", "def _inverse_affine_matrix(self) -> np.ndarray:\n raise NotImplementedError", "def inv(in_A):\n Q,R = qr(in_A)\n QT = Q.T\n N = shape(in_A)[0]\n \n for n in range(N-1,-1,-1):\n Rnn = R[n,n]\n R[n,:] /= Rnn\n QT[n,:] /= Rnn\n for m in range(n+1,N):\n Rnm = R[n,m]\n R[n,m] = 0\n QT[n,:] -= QT[m,:]*Rnm\n\n return QT", "def posdef_inv_matrix_inverse(tensor, identity, damping):\n return tf.matrix_inverse(tensor + damping * identity)", "def _inverse_transform(self, X, y=None):\n return clone(self.transformer).fit(X=X, y=y).inverse_transform(X=X, y=y)", "def inverse(self):\n tfft_inv = np.zeros(\n self.tfft.shape, dtype=np.complex)\n tfft_inv[self.tfft != 0] = 1. / self.tfft[self.tfft != 0]\n efft_inv = np.zeros(\n self.efft.shape, dtype=np.complex)\n efft_inv[self.efft != 0] = 1. / self.efft[self.efft != 0]\n bfft_inv = np.zeros(\n self.bfft.shape, dtype=np.complex)\n bfft_inv[self.bfft != 0] = 1. / self.bfft[self.bfft != 0]\n\n ret = tebfft(\n self.nx,\n self.dx, [tfft_inv, efft_inv, bfft_inv],\n ny=self.ny,\n dy=self.dy)\n\n return ret", "def inv_m(self):\n self.m = -self.m", "def invert(self, *args, **kwargs):\n with_units = kwargs.pop('with_units', False)\n\n if not utils.isnumerical(args[0]):\n args = self.output_frame.coordinate_to_quantity(*args)\n if self.output_frame.naxes == 1:\n args = [args]\n try:\n if not self.backward_transform.uses_quantity:\n args = utils.get_values(self.output_frame.unit, *args)\n except (NotImplementedError, KeyError):\n args = utils.get_values(self.output_frame.unit, *args)\n\n if 'with_bounding_box' not in kwargs:\n kwargs['with_bounding_box'] = True\n\n if 'fill_value' not in kwargs:\n kwargs['fill_value'] = np.nan\n\n try:\n # remove iterative inverse-specific keyword arguments:\n akwargs = {k: v for k, v in kwargs.items() if k not in _ITER_INV_KWARGS}\n result = self.backward_transform(*args, **akwargs)\n except (NotImplementedError, KeyError):\n result = self.numerical_inverse(*args, **kwargs, with_units=with_units)\n\n if with_units and self.input_frame:\n if self.input_frame.naxes == 1:\n return self.input_frame.coordinates(result)\n else:\n return self.input_frame.coordinates(*result)\n else:\n return result" ]
[ "0.67732894", "0.665631", "0.65951234", "0.65613794", "0.6526042", "0.6483693", "0.6445868", "0.6425579", "0.6381038", "0.6379403", "0.6377041", "0.6358489", "0.6346017", "0.6340541", "0.6327942", "0.63209075", "0.62847763", "0.6274256", "0.62589926", "0.6158384", "0.61480635", "0.6145619", "0.6144255", "0.6116194", "0.61112756", "0.6102303", "0.6099434", "0.60957944", "0.60862064", "0.6085494", "0.6078072", "0.6065618", "0.60533834", "0.60343814", "0.6033006", "0.6019098", "0.600921", "0.6004677", "0.60040504", "0.6001581", "0.5997482", "0.5975962", "0.59700876", "0.5969523", "0.5962544", "0.5954064", "0.5954064", "0.5954064", "0.5954064", "0.5954064", "0.59503514", "0.5945828", "0.5945436", "0.59353465", "0.59202576", "0.59076196", "0.59076196", "0.5900619", "0.5874374", "0.58647925", "0.58536875", "0.5853359", "0.5851917", "0.5836698", "0.5830392", "0.5827673", "0.5823221", "0.58221895", "0.5818514", "0.5817079", "0.58040637", "0.57920754", "0.5789731", "0.5784987", "0.577773", "0.576102", "0.57585955", "0.5747421", "0.5745714", "0.5745288", "0.5743484", "0.5738358", "0.5734558", "0.5733933", "0.5710756", "0.5703536", "0.56919444", "0.5689805", "0.5689629", "0.5689344", "0.56785816", "0.56723", "0.5670992", "0.56685245", "0.56608206", "0.56580514", "0.5657403", "0.5656013", "0.56526583", "0.5641265" ]
0.6248697
19
Replace colored pixels with a `neutral_color`. The `ratio` defines the 'colorfulness' above which level the pixel should be replace. I.e. if the `ratio` is 1 nothing will be replaced, if `ratio` is 0 only strict greys are kept unmodified.
def remove_color(img: np.ndarray, ratio: float, neutral_color: Tuple[int, int, int] = RGB_WHITE) -> None: channels = img.shape[-1] assert channels == 3, "Not a 3 channel color image" norm = np.std(np.array(RGB_YELLOW)) # this is the same for all pure colors sd = np.std(img, axis=-1) img[sd > ratio * norm] = neutral_color
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ratio_to_rgb(ratio):\n b = 0\n if round(ratio, 1) == 0.5:\n r = 255\n g = 255\n elif ratio < 0.5:\n r = int(ratio * 2 * 255.0)\n g = 255\n else:\n r = 255\n g = int((1.0 - ratio) * 2 * 255.0)\n rgb = (r, g, b)\n\n return rgb", "def set_neutral(self):\n\t\tself._head.set_pan(0.0)", "def to_aspect_ratio_add_and_remove(image, target_ratio):\n height = image.shape[0]\n width = image.shape[1]\n ratio = width / height\n\n remove_top = 0\n remove_right = 0\n remove_bottom = 0\n remove_left = 0\n pad_top = 0\n pad_bottom = 0\n pad_left = 0\n pad_right = 0\n\n # loops here are inefficient, but easy to read\n i = 0\n if ratio < target_ratio:\n # vertical image, height > width\n while ratio < target_ratio:\n if i % 4 == 0:\n remove_top += 1\n height -= 1\n elif i % 4 == 2:\n remove_bottom += 1\n height -= 1\n elif i % 4 == 1:\n pad_right += 1\n width += 1\n else: # i % 4 == 3\n pad_left += 1\n width += 1\n ratio = width / height\n i += 1\n elif ratio > target_ratio:\n # horizontal image, width > height\n while ratio > target_ratio:\n if i % 4 == 0:\n remove_right += 1\n width -= 1\n elif i % 4 == 2:\n remove_left += 1\n width -= 1\n elif i % 4 == 1:\n pad_top += 1\n height += 1\n else: # i % 4 == 3\n pad_bottom += 1\n height += 1\n ratio = width / height\n i += 1\n\n # remove cols/rows\n if any([val > 0 for val in [remove_top, remove_right, remove_bottom, remove_left]]):\n image = image[remove_top:(height - remove_bottom), remove_left:(width - remove_right), ...]\n\n # add cols/rows (black)\n if any([val > 0 for val in [pad_top, pad_bottom, pad_left, pad_right]]):\n image = np.pad(image, ((pad_top, pad_bottom), \\\n (pad_left, pad_right), \\\n (0, 0)), \\\n mode=\"constant\")\n\n return image", "def set_slide_neutral(self):\n print(\"Moving to neutral pose...\")\n joint_positions = deepcopy(self.neutral_joint_positions)\n\n joint_positions['right_j5'] = joint_positions['right_j5'] - np.pi / 2.\n self._right_arm.move_to_joint_positions(joint_positions)", "def set_ratio(self, ratio: tuple) -> None:\r\n self.ratio = ratio", "def src_set_ratio(state, new_ratio):\n return _lib.src_set_ratio(state, new_ratio) if state else None", "def _update_classification_localization_weight_ratio(configs, ratio):\n meta_architecture = configs[\"model\"].WhichOneof(\"model\")\n if meta_architecture == \"faster_rcnn\":\n model = configs[\"model\"].faster_rcnn\n model.first_stage_localization_loss_weight = 1.0\n model.first_stage_objectness_loss_weight = ratio\n model.second_stage_localization_loss_weight = 1.0\n model.second_stage_classification_loss_weight = ratio\n if meta_architecture == \"ssd\":\n model = configs[\"model\"].ssd\n model.loss.localization_weight = 1.0\n model.loss.classification_weight = ratio", "def undersample_majority(df, ratio=1.0, random_state=3):\n count_class_0, count_class_1 = df[\"Status\"].value_counts()\n df_class_0 = df[df[\"Status\"] == \"paid\"]\n df_class_1 = df[df[\"Status\"] == \"defaulted\"]\n # print(count_class_0)\n # print(count_class_1)\n df_class_0_under = df_class_0.sample(\n int(ratio * count_class_1), random_state=random_state\n )\n df_train_under = pd.concat([df_class_0_under, df_class_1], axis=0)\n # print(df_train_under['Status'].value_counts)\n return df_train_under", "def generate_noise_image(self, content_image, noise_ratio=0.6):\n noise_image = np.random.uniform(-20, 20,\n (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype('float32')\n # White noise image from the content representation. Take a weighted average\n # of the values\n img = noise_image * noise_ratio + content_image * (1 - noise_ratio)\n return img", "def _neutral(self) -> np.ndarray:\n # Get the neutral configuration of the actual model\n qpos = neutral(self.robot.pinocchio_model)\n\n # Make sure it is not out-of-bounds\n position_limit_lower = self.robot.position_limit_lower\n position_limit_upper = self.robot.position_limit_upper\n for idx, val in enumerate(qpos):\n lo, hi = position_limit_lower[idx], position_limit_upper[idx]\n if hi < val or val < lo:\n qpos[idx] = 0.5 * (lo + hi)\n\n # Return rigid/flexible configuration\n if self.simulator.use_theoretical_model:\n return qpos[self.robot.rigid_joints_position_idx]\n return qpos", "def correct_rhohv(radar, rhohv_name=\"RHOHV\", snr_name=\"SNR\"):\n rhohv = radar.fields[rhohv_name][\"data\"].copy()\n snr = radar.fields[snr_name][\"data\"].copy()\n\n natural_snr = 10 ** (0.1 * snr)\n natural_snr = natural_snr.filled(-9999)\n rho_corr = rhohv * (1 + 1 / natural_snr)\n\n # Not allowing the corrected RHOHV to be lower than the raw rhohv\n rho_corr[np.isnan(rho_corr) | (rho_corr < 0) | (rho_corr > 1)] = 1\n try:\n rho_corr = rho_corr.filled(1)\n except Exception:\n pass\n\n return rho_corr", "def skinPercent(*args, ignoreBelow: Union[float, bool]=0.0, normalize: bool=True, pruneWeights:\n float=0.0, relative: bool=True, resetToDefault: bool=True, transform:\n Union[AnyStr, bool]=\"\", transformMoveWeights: Union[AnyStr, List[AnyStr]]=\"\",\n transformValue: Union[List[AnyStr, float], List[List[AnyStr, float]]]=None,\n value: bool=True, zeroRemainingInfluences: bool=True, q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass", "def clean_ratio(sub, total, default=0.0):\n return sub / total if total else default", "def balance_all(self,\n majority_minority_ratio: int = 1,\n random_state: int or None = RANDOM_STATE\n ):\n\n self.balance_training(majority_minority_ratio, random_state=random_state)\n self.balance_testing(majority_minority_ratio, random_state=random_state)", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def randomly_negate_level(value: Any) -> Any:\n return -value if ImageTransformationBase._toss_fair_coin() else value", "def test_colormap_as_colors_silhouette(self):\n # Generate a blobs data set\n X, y = make_blobs(\n n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0\n )\n\n fig = plt.figure()\n ax = fig.add_subplot()\n\n visualizer = SilhouetteVisualizer(\n MiniBatchKMeans(random_state=0), ax=ax, colors=\"cool\"\n )\n visualizer.fit(X)\n visualizer.finalize()\n\n tol = (\n 3.2 if sys.platform == \"win32\" else 0.01\n ) # Fails on AppVeyor with RMS 3.143\n self.assert_images_similar(visualizer, remove_legend=True, tol=tol)", "def remove_missing_values(train_data, test_data, ratio):\n missing_ratios = _get_missing_ratios(train_data)\n removed_attribute_indexes = list(\n map(\n lambda item: str(item[0] + 1),\n filter(lambda item: item[1] > ratio, enumerate(missing_ratios))))\n data_filter = Filter(\n classname=\"weka.filters.unsupervised.attribute.Remove\",\n options=[\"-R\", \",\".join(removed_attribute_indexes)])\n data_filter.inputformat(test_data)\n return data_filter.filter(train_data), data_filter.filter(test_data)", "def test_colors_silhouette(self):\n # Generate a blobs data set\n X, y = make_blobs(\n n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0\n )\n\n fig = plt.figure()\n ax = fig.add_subplot()\n\n visualizer = SilhouetteVisualizer(\n MiniBatchKMeans(random_state=0),\n ax=ax,\n colors=[\"red\", \"green\", \"blue\", \"indigo\", \"cyan\", \"lavender\"],\n )\n visualizer.fit(X)\n visualizer.finalize()\n\n self.assert_images_similar(visualizer, remove_legend=True)", "def set_transparent(img):\n assert img.shape[-1] == 4\n white_pix = np.all(img == [255, 255, 255, 255], axis=-1)\n # print(white_pix)\n img[white_pix, -1] = 0\n # return img", "def setup_ratio(args, ax, ax_ratio):\n main_ticks = ax.yaxis.get_major_ticks()\n main_ticks[0].label1.set_visible(False)\n ax.yaxis.set_label_coords(-0.12,1)\n ax_ratio.yaxis.set_label_coords(-0.12,.5)\n if args.logx:\n ax_ratio.set_xscale('log')\n if args.xlabel:\n ax_ratio.set_xlabel(tex_escape(args.xlabel), x=1, ha='right')\n if args.rlabel:\n ax_ratio.set_ylabel(args.rlabel)\n if args.limits:\n ax_ratio.set_xlim(args.limits[0],args.limits[1])\n if args.rmin is not None:\n ax_ratio.set_ylim(bottom=args.rmin)\n if args.rmax is not None:\n ax_ratio.set_ylim(top=args.rmax)\n ax_ratio.yaxis.grid(True)\n xmin, xmax, ymin, ymax = ax_ratio.axis()\n ax_ratio.yaxis.set_major_locator(ticker.MaxNLocator(3))\n ax_ratio.yaxis.set_minor_locator(ticker.AutoMinorLocator())\n if not args.logx:\n ax_ratio.xaxis.set_minor_locator(ticker.AutoMinorLocator())\n return", "def set_strength_ratios(\n self,\n strength_ratios: Union[float, Tuple[float], np.ndarray],\n ):\n self._strength_ratios = np.clip(\n _convert_to_np_array(strength_ratios, self._num_motors), 0, 1)", "async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )", "def unifiedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def update_classify(self, img, rho=0.01, threshold=2.5):\n self.update(img, rho)\n return self.classify(img, threshold)", "def generate_noise_image(content_image, noise_ratio = CONFIG.NOISE_RATIO):\n\tnoise_img = np.random.uniform(-20,20,(1, CONFIG.IMAGE_HEIGHT, CONFIG.IMAGE_WIDTH, CONFIG.COLOR_CHANNELS)).astype('float32')\n\n\t# Setting the resulting image to be the weighted average of the content image and noise_image\n\tresult_img = noise_img * noise_ratio + content_image * (1 - noise_ratio)\n\n\treturn result_img", "def random_img_to_gray(self, img, p = 0.5):\n if self.decision(p):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = np.zeros_like(img)\n img[:, :, 0] = gray\n img[:, :, 1] = gray\n img[:, :, 2] = gray\n return img", "def test_nan_color_copy():\n\n data = np.zeros((16, 16))\n\n f1 = FITSFigure(data)\n f1.show_grayscale()\n f1.set_nan_color('blue')\n\n f2 = FITSFigure(data)\n f2.show_grayscale()\n f2.set_nan_color('red')\n\n assert f1.image.get_cmap()._rgba_bad == (0.0, 0.0, 1.0, 1.0)\n assert f2.image.get_cmap()._rgba_bad == (1.0, 0.0, 0.0, 1.0)", "def line_ratio(ratio_name,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results()\n\n if ratio_name == 'NII':\n line1,line2 = '[NII]122','[NII]205'\n L_line1 = getattr(GR,'L_'+line1+'_sun')\n L_line2 = getattr(GR,'L_'+line2+'_sun')\n # Get ratio where the two samples overlap:\n ratio = L_line1 / L_line2\n ratio = ratio[ratio != 0]\n label = '%s / %s' % (line1,line2)\n\n if ratio_name == 'OICII':\n line1,line2 = '[OI]63','[CII]'\n L_line1 = getattr(GR,'L_'+line1+'_sun')\n L_line2 = getattr(GR,'L_'+line2+'_sun')\n # Get ratio where the two samples overlap:\n ratio = L_line1 / L_line2\n ratio = ratio[ratio > 1e-2]\n ratio = np.log10(ratio[ratio != 0])\n label = 'log %s / %s' % (line1,line2)\n\n fig,ax = plt.subplots(figsize=(10,8))\n h = ax.hist(ratio,bins=10,color='orange')\n\n ax.set_xlabel(label,fontsize=15)\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/ratio_%s' % ratio_name,dpi=300)", "def oversample_minority(df, ratio=1.0, random_state=3):\n count_class_0, count_class_1 = df[\"Status\"].value_counts()\n df_class_0 = df[df[\"Status\"] == \"paid\"]\n df_class_1 = df[df[\"Status\"] == \"defaulted\"]\n # print(count_class_0)\n # print(count_class_1)\n df_class_1_over = df_class_1.sample(\n int(ratio * count_class_0), replace=True, random_state=random_state\n )\n df_train_over = pd.concat([df_class_0, df_class_1_over], axis=0)\n # print(df_train_over['Status'].value_counts())\n return df_train_over", "def aspect_ratio(self, aspect_ratio: float):\n assert type(aspect_ratio) in (int, float)\n self._aspect_ratio = aspect_ratio\n self._reset_matrix()", "def ratio_cleaner(data, min_ratio_value=1.0):\n ratio_attribs = []\n for attrib in list(data):\n if attrib.endswith('_chance'):\n ratio_attribs.append(attrib)\n\n indices_to_remove = []\n for attrib in ratio_attribs:\n all_non_zero_ratios_for_shift = []\n all_ratios_for_shift = []\n for ratio in data[attrib]:\n all_ratios_for_shift.append(ratio)\n if ratio > 0:\n all_non_zero_ratios_for_shift.append(ratio)\n\n number_of_ratios = len(all_ratios_for_shift)\n number_of_non_zero_ratios = len(all_non_zero_ratios_for_shift)\n\n ratios_per_day_of_week = number_of_ratios / 7\n\n split_time = attrib.replace('_chance', '').split(' - ')\n military_start_time = split_time[0]\n military_end_time = split_time[1]\n\n non_zero_ratios_per_day_of_week = number_of_non_zero_ratios / ratios_per_day_of_week # 0.0 - 7.0 (more clear)\n # scaled_non_zero_ratios_per_day_of_week = number_of_non_zero_ratios / number_of_ratios # 0.0 - 1.0\n\n if non_zero_ratios_per_day_of_week < min_ratio_value:\n attrib_as_shift = attrib.replace('_chance', '')\n index_to_remove = data['shifts_key'].iloc[0].index(attrib_as_shift)\n indices_to_remove.append(index_to_remove)\n\n data.drop(attrib, axis=1, inplace=True)\n\n sorted_indices_to_remove = sorted(indices_to_remove, reverse=True)\n cleaned_shifts_key = data['shifts_key'].iloc[0]\n for index in sorted_indices_to_remove:\n cleaned_shifts_key.pop(index)\n\n # set every shifts_key row to the newly updated one.\n data['shifts_key'] = [cleaned_shifts_key] * len(data['shifts_key'])\n for index, row in data.iterrows():\n for removal_index in sorted_indices_to_remove:\n row['shifts_targets'].pop(removal_index)\n\n return data", "def set_multiplex_ratio(ratio):\n send_command(0xA8)\n send_command(ratio)", "def uniqueish_color():\n return plt.cm.gist_ncar(np.random.random())", "def denoise(img, h=10, hForColor=None, templateWindowSize=7, searchWindowSize=21):\n\tif hForColor is None:\n\t\thForColor=h\n\ttmp = img.copy()\n\tif len(img.shape) != 3:\n\t\tdst = cv2.fastNlMeansDenoising(tmp, None, h, templateWindowSize, searchWindowSize)\n\telse:\n\t\tdst = cv2.fastNlMeansDenoisingColored(img, None, h, hForColor, templateWindowSize, searchWindowSize)\n\treturn dst", "def test_colormap_silhouette(self):\n # Generate a blobs data set\n X, y = make_blobs(\n n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0\n )\n\n fig = plt.figure()\n ax = fig.add_subplot()\n\n visualizer = SilhouetteVisualizer(\n MiniBatchKMeans(random_state=0), ax=ax, colormap=\"gnuplot\"\n )\n visualizer.fit(X)\n visualizer.finalize()\n\n self.assert_images_similar(visualizer, remove_legend=True)", "def setNoZeroColor():\n dislin.nobgd()", "def fix_nationality_field(self):\n where = self.df[\"Nationality\"] == \"None\"\n where &= self.df[\"Nationality\"] == \"white\"\n self.df[\"Nationality\"].loc[where] = 0\n self.df[\"Nationality\"].loc[np.invert(where)] = 1", "def _grow_solo_secondary(self, amt):\n self.ratio -= amt\n self.ratio = max(self.min_ratio, self.ratio)", "def compression_ratio(self, compression_ratio):\n if self._configuration.client_side_validation and compression_ratio is None:\n raise ValueError(\"Invalid value for `compression_ratio`, must not be `None`\") # noqa: E501\n\n self._compression_ratio = compression_ratio", "def testSetColorCorrectionsNone(self):\n self.node.color_corrections = None\n\n self.assertEqual(\n [],\n self.node.color_corrections\n )", "def set_ratio(self, value):\n scene = self.scenes[self.current_scene]\n scene.set_perspective(ratio=value)\n self.redraw()", "def replace_single_color(img, color, new_color):\n backgound_binary = np.where((img == color).all(axis=2))\n img[backgound_binary] = new_color\n return img", "def test_color_balance_random_images(self, img):\n\n # color balance only works if every channel has at least two different\n # values, otherwise everything in that channel would be mapped to 0\n for channel in cv2.split(img):\n assume(len(np.unique(channel)) >= 2)\n\n balanced_img = balance_color(img, percentile=0)\n\n assert balanced_img.max() == 255, \\\n \"Maximum of a balanced image should be 255\"\n assert balanced_img.min() == 0, \\\n \"Minimum of a balanced image should be 0\"\n for channel in cv2.split(balanced_img):\n assert channel.max() == 255, \\\n \"Maximum of each channel should be 255\"\n assert channel.min() == 0, \\\n \"Minimum of each channel should be 0\"", "def remove_dark(self):\r\n self.decimate(numpy.isfinite(self.z))", "def noisy_color(col, noise, amount) :\n if random.random() < noise :\n red = (col[0] + random.randrange(-amount,amount))\n green = (col[1] + random.randrange(-amount,amount))\n blue = (col[2] + random.randrange(-amount,amount))\n red = clamp(red,0,255)\n green = clamp(green,0,255)\n blue = clamp(blue,0,255)\n return (red,green,blue)\n else :\n return col", "def remove_dark_background(self, image_array):\n\n cut_off = self.get_image_balance(image_array, False)\n if cut_off < 200:\n cut_off = 200\n new_array = image_array.copy()\n new_array.setflags(write=1)\n for row_number, each_row in enumerate(new_array):\n for pixel_number, each_pixel in enumerate(each_row):\n if reduce(lambda x, y: int(x) + int(y), each_pixel[:3]) / 3 > cut_off:\n new_array[row_number][pixel_number] = image_array[row_number][pixel_number]\n else:\n new_array[row_number][pixel_number] = [0, 0, 0] # Black\n return new_array", "def auto_clean(image, background_value=25, background_saturation=20,\n colors=8, sample_fraction=5, white_background=False,\n saturate=True, palette=None):\n if background_value < 1:\n background_value = 1\n elif background_value > 100:\n background_value = 100\n if background_saturation < 1:\n background_saturation = 1\n elif background_saturation > 100:\n background_saturation = 100\n if sample_fraction < 1:\n sample_fraction = 1\n elif sample_fraction > 100:\n sample_fraction = 100\n if colors < 2:\n colors = 2\n elif colors > 128:\n colors = 128\n Options = namedtuple(\n 'options',\n ['quiet', 'sample_fraction', 'value_threshold', 'sat_threshold']\n )\n options = Options(\n quiet=True,\n sample_fraction=sample_fraction / 100.0,\n value_threshold=background_value / 100.0,\n sat_threshold=background_saturation / 100.0,\n )\n rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n if palette is None:\n samples = noteshrink.sample_pixels(rgb_image, options)\n palette = get_palette(samples, colors, background_value,\n background_saturation)\n labels = noteshrink.apply_palette(rgb_image, palette, options)\n if saturate:\n palette = palette.astype(np.float32)\n pmin = palette.min()\n pmax = palette.max()\n palette = 255 * (palette - pmin) / ((pmax - pmin) or 1)\n palette = palette.astype(np.uint8)\n if white_background:\n palette = palette.copy()\n palette[0] = (255, 255, 255)\n return palette[labels][:, :, ::-1] # swap R and G channels", "def _limit_by_ratio(self):\n\n if self._ratio_bounds is None:\n return\n\n numerator_col, denominator_col = self._ratio_cols\n min_ratio, max_ratio = sorted(self._ratio_bounds)\n\n overlap_idx = self._hybrid_meta[MERGE_COLUMN].isin(\n self.data.merge_col_overlap_values\n )\n\n numerator_vals = self._hybrid_meta[numerator_col].copy()\n denominator_vals = self._hybrid_meta[denominator_col].copy()\n\n ratios = (\n numerator_vals.loc[overlap_idx]\n / denominator_vals.loc[overlap_idx]\n )\n ratio_too_low = (ratios < min_ratio) & overlap_idx\n ratio_too_high = (ratios > max_ratio) & overlap_idx\n\n numerator_vals.loc[ratio_too_high] = (\n denominator_vals.loc[ratio_too_high].values * max_ratio\n )\n denominator_vals.loc[ratio_too_low] = (\n numerator_vals.loc[ratio_too_low].values / min_ratio\n )\n\n h_num_name = \"hybrid_{}\".format(numerator_col)\n h_denom_name = \"hybrid_{}\".format(denominator_col)\n self._hybrid_meta[h_num_name] = numerator_vals.values\n self._hybrid_meta[h_denom_name] = denominator_vals.values", "def to_aspect_ratio_add(image, target_ratio):\n height = image.shape[0]\n width = image.shape[1]\n ratio = width / height\n\n pad_top = 0\n pad_bottom = 0\n pad_left = 0\n pad_right = 0\n\n # loops here are inefficient, but easy to read\n i = 0\n if ratio < target_ratio:\n # vertical image, height > width\n while ratio < target_ratio:\n if i % 2 == 1:\n pad_right += 1\n width += 1\n else: # i % 4 == 3\n pad_left += 1\n width += 1\n ratio = width / height\n i += 1\n elif ratio > target_ratio:\n # horizontal image, width > height\n while ratio > target_ratio:\n if i % 2 == 1:\n pad_top += 1\n height += 1\n else: # i % 4 == 3\n pad_bottom += 1\n height += 1\n ratio = width / height\n i += 1\n\n # add black cols/rows\n if any([val > 0 for val in [pad_top, pad_bottom, pad_left, pad_right]]):\n image = np.pad(image, ((pad_top, pad_bottom), \\\n (pad_left, pad_right), \\\n (0, 0)), \\\n mode=\"constant\")\n\n return image", "def drop_connect_pt(x, drop_ratio):\n keep_ratio = 1.0 - drop_ratio\n mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)\n mask.bernoulli_(keep_ratio)\n x.div_(keep_ratio)\n x.mul_(mask)\n return x", "def hsv_normalise(self, frame):\n tmp = cv.CreateImage(cv.GetSize(frame), 8, 3)\n cv.CvtColor(frame, tmp, cv.CV_BGR2HSV)\n\n H,S,V = [ cv.CreateImage(cv.GetSize(frame), 8, 1) for _ in range(3) ]\n cv.Split(tmp, H, S, V, None)\n\n cv.Set(V, 140)\n\n cv.Merge(H,S,V, None, tmp);\n cv.CvtColor(tmp, tmp, cv.CV_HSV2BGR),\n out = tmp\n\n return out", "def drop_connect(x, drop_ratio):\n keep_ratio = 1.0 - drop_ratio\n mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)\n mask.bernoulli_(keep_ratio)\n x.div_(keep_ratio)\n x.mul_(mask)\n return x", "def drop_connect(x, drop_ratio):\n keep_ratio = 1.0 - drop_ratio\n mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)\n mask.bernoulli_(keep_ratio)\n x.div_(keep_ratio)\n x.mul_(mask)\n return x", "def happy_color(health):\n if health > 0.8:\n return 'g'\n if health > 0.6:\n return 'y'\n return 'r'", "def compute_rating(positive_count, neutral_count, negative_count):\n total = positive_count + neutral_count + negative_count\n if total < 5:\n return 'NEUTRAL'\n\n pos = positive_count/total\n neg = negative_count/total\n\n if pos > 0.3 and neg > 0.3:\n return 'CONTROVERSIAL'\n if pos > 0.7 or (pos > 0.5 and pos >= neg * 2):\n return 'POSITIVE'\n if neg > 0.7 or (neg > 0.5 and neg >= pos * 2):\n return 'NEGATIVE'\n return 'NEUTRAL'", "def _InitialiseNeutralisationReactions():\n patts = (\n # Imidazoles\n (\"[n+;H]\", \"n\"),\n # Amines\n (\"[N+;!H0]\", \"N\"),\n # Carboxylic acids and alcohols\n (\"[$([O-]);!$([O-][#7])]\", \"O\"),\n # Thiols\n (\"[S-;X1]\", \"S\"),\n # Sulfonamides\n (\"[$([N-;X2]S(=O)=O)]\", \"N\"),\n # Enamines\n (\"[$([N-;X2][C,N]=C)]\", \"N\"),\n # Tetrazoles\n (\"[n-]\", \"[nH]\"),\n # Sulfoxides\n (\"[$([S-]=O)]\", \"S\"),\n # Amides\n (\"[$([N-]C=O)]\", \"N\"),\n )\n return [(Chem.MolFromSmarts(x), Chem.MolFromSmiles(y, False)) for x, y in patts]", "def _denoise(self, img, weight):\n\n from skimage.filters import denoise_tv_chambolle\n\n img = denoise_tv_chambolle(img, weight=weight) * 255\n\n return img.astype(\"uint8\")", "def channel_blend(pixSrc, pixPng, srcH, srcW, x, y, mode='weight', color_match=False):\n modes = [item for i, item in blend_mode.items()]\n # 1.find all indices satisfying conditions, and replace the value of indices in source image with logo image.\n # note: from pillow to numpy, (w,h) has converted to (h,w).\n index = np.where(pixPng[:, :, 3] > 15)\n y_id = index[0] + y - 1\n x_id = index[1] + x - 1\n\n # ensure the exceeding part remained in boundary.\n y_id = np.where(y_id >= srcH, srcH - 1, y_id)\n x_id = np.where(x_id >= srcW, srcW - 1, x_id)\n id = (y_id, x_id)\n\n # matching logo color with source image.\n if color_match:\n pixSrc_ = pixSrc.copy()[..., :3]\n pixPng_ = pixPng.copy()[..., :3]\n mean_source, stddev_source = cv2.meanStdDev(pixSrc_)\n mean_png, stddev_png = cv2.meanStdDev(pixPng_)\n mdiff = mean_png - mean_source\n mdiff = np.array(mdiff).reshape((1, 1, 3))\n pixPng_ = pixPng_.astype(np.float64)\n pixPng_ -= mdiff\n pixPng_ = np.clip(pixPng_, 0, 255)\n pixPng_ = pixPng_.astype(np.uint8)\n pixPng[..., :3] = pixPng_\n\n if mode not in modes: raise NotImplementedError(\n \"only {0:'naive',1:'weight',2:'poisson',3:'multiply'} are supported.\")\n if mode == 'weight':\n pixSrc = weight_paste(pixSrc, pixPng, id, index)\n elif mode == 'naive':\n pixSrc = naive_paste(pixSrc, pixPng, id, index)\n elif mode == 'poisson':\n pixSrc = poisson_blend(pixSrc, pixPng, id, index, x, y)\n elif mode == 'multiply':\n pixSrc = multiply(pixSrc, pixPng, id, index)\n\n return cv2.cvtColor(pixSrc, cv2.COLOR_RGBA2RGB)", "def contrast_from_bg(cls, col=\"#000000\", dark_default=\"000000\", light_default=\"FFFFFF\", hashed=\"#\"):\n trigger = float(0.45) #Values greater than this result in black text\n if not col:\n return \"#000000\" #Default to black\n if col in (\"Transparent\",\"transparent\"):\n return \"#000000\" #Default to black\n if not hashed:\n hashed = \"\"\n elif hashed is True:\n hashed = \"#\"\n try:\n col_out = cls.colour_to_rgb_tuple(col)\n r,g,b = col_out\n div = 255.0 #Produces a value between 0-1 as a float\n lum = float(0.2126*pow(r/div, 2.2)) + float(0.7152*pow(g/div, 2.2)) + float(0.0722*pow(b/div, 2.2))\n except (TypeError, ValueError):\n return dark_default\n #logging.info (\"Luminosity: %s\" % lum)\n #Decision gate:\n if lum >= trigger: #Light background, need dark text\n return \"%s%s\" % (hashed, dark_default)\n else: #Dark background, need light text\n return \"%s%s\" % (hashed, light_default)", "def test_mixing_ratio_from_relative_humidity():\n p = 1013.25 * units.mbar\n temperature = 20. * units.degC\n rh = 81.7219 * units.percent\n w = mixing_ratio_from_relative_humidity(p, temperature, rh)\n assert_almost_equal(w, 0.012 * units.dimensionless, 3)", "def _get_color_white_level(self, color):\n d0, _, d2 = self._get_color_dominance_indices(color)\n if color[d0] == 0:\n return 0\n return color[d2]/color[d0]", "def set_transparent_percent(self, percent):\n self.params['transparent_percent'] = percent", "def IMAGE_PREPROCESSING_DEFAULT(img, grayscale_only=False):\n if grayscale_only:\n return cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)\n else:\n img = cv2.medianBlur(img, 9)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = cv2.bilateralFilter(img, 7, 13, 13)\n return cv2.Canny(img,100,200)", "def _shrink_solo_secondary(self, amt):\n self.ratio += amt\n self.ratio = min(self.max_ratio, self.ratio)", "def remove_background1(img):\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = img.astype(np.uint8)\n # Binarize the image using OTSU's algorithm. This is used to find the center\n # of mass of the image, and find the threshold to remove background noise\n threshold, _ = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # Remove noise - anything higher than the threshold. Note that the image is still grayscale\n img[img > threshold] = 255\n\n return img", "def trim(self, ratio=10000):\n trimmed, total = 0, 0\n for sources in self.sources():\n for s in (self.tp_by_source_and_text[sources],\n self.fp_by_source_and_text[sources],\n self.fn_by_source_and_text[sources],\n self.overlap_by_source_and_text[sources]):\n try:\n max_count = s.most_common(1)[0][1]\n except IndexError:\n continue\n for k, v in list(s.items()):\n if v * ratio < max_count:\n trimmed += 1\n del s[k]\n total += 1\n print(f'trimmed {trimmed}/{total} ({trimmed/total:.1%})',\n file=sys.stderr, flush=True)", "def lowes_ratio_test(matches, ratio_threshold=0.6):\n filtered_matches = []\n for m, n in matches[:len(matches)-100]:\n if m.distance < ratio_threshold * n.distance:\n filtered_matches.append(m)\n return filtered_matches", "def testSetColorDecisionsNone(self):\n self.node.color_decisions = None\n\n self.assertEqual(\n [],\n self.node.color_decisions\n )", "def heuristic(state, player):\n if is_checkmate(state):\n if state.active_color == cc.WHITE_ACTIVE:\n return -9999\n else:\n return 9999\n else:\n return material_advantage(state, player)", "def _monochrome(self):\r\n\r\n self.img = self.img.convert('L')\r\n self.img = Image.eval(self.img, lambda a: 0 if a <= MONOWEIGHT else 255)", "def test_specific_humidity_from_mixing_ratio_no_units():\n w = 0.01215\n q = specific_humidity_from_mixing_ratio(w)\n assert_almost_equal(q, 0.01200, 5)", "def normalized(self):\n\n # Scale results to max at 255 for image display\n max_distance = np.max(self.depth_prediction)\n pred = 255 * self.depth_prediction // max_distance\n\n # Convert results to uint8\n pred = pred.astype(np.uint8, copy=True)\n\n # Do fancy coloring\n pred = cv2.applyColorMap(pred, cv2.COLORMAP_JET)\n\n return pred", "def remove_noise(self):\n kernel = np.ones((5, 5), np.uint8)\n self.frame = cv.morphologyEx(self.frame, cv.MORPH_CLOSE, kernel)\n self.frame = cv.morphologyEx(self.frame, cv.MORPH_OPEN, kernel)", "def test_mixing_ratio_from_rh_dimensions():\n p = 1000. * units.mbar\n temperature = 0. * units.degC\n rh = 100. * units.percent\n assert (str(mixing_ratio_from_relative_humidity(p, temperature, rh).units)\n == 'dimensionless')", "def test_rh_mixing_ratio():\n p = 1013.25 * units.mbar\n temperature = 20. * units.degC\n w = 0.012 * units.dimensionless\n rh = relative_humidity_from_mixing_ratio(p, temperature, w)\n assert_almost_equal(rh, 81.72498 * units.percent, 3)", "def molecular_to_photon_fraction(m, qy_ratio):\n return photon_to_molecular_fraction(m, 1 / qy_ratio)", "def darkText(img):\n kernel = np.ones((30, 30), np.uint8) \n img_orig = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)\n \n TH = 150\n img_orig[(img_orig[:,:,0] < TH) | (img_orig[:,:,1] < TH) | (img_orig[:,:,2] < TH)] = (0,0,0)\n \n img_orig = closing(img_orig, size=(1, int(img.shape[1] / 8)))\n \n return (cv2.cvtColor(img_orig, cv2.COLOR_BGR2GRAY) != 0).astype(np.uint8)", "def __init__(self, negative_ratio: float = 1, pool_size: float = 10):\n super().__init__(pool_size=pool_size)\n self.negative_ratio = negative_ratio", "def normalize_image(self, factor, luminosity=None):\n if not luminosity:\n luminosity = self.average_luminosity()\n\n for i in range(len(self.pixels)):\n self.pixels[i] = self.pixels[i] * (factor / luminosity)", "def lightness_correction(self):\n points = self.color_lookup_table_points\n lightness_max_value = math.sqrt(3 * (255**2))\n deadpool = list()\n for index, point in enumerate(points[0]):\n point = self.get_value_tuple(index)\n lightness = int(math.sqrt(point[0]**2 + point[1]**2 + point[2]**2) * 255 / lightness_max_value)\n if not self.to_dark < lightness < self.to_bright:\n deadpool.append(index)\n self.color_lookup_table_points = (np.delete(points[0], deadpool),\n np.delete(points[1], deadpool),\n np.delete(points[2], deadpool))\n self.point_count = len(self.color_lookup_table_points[0])", "def zero_one(im):\n m = im.min()\n im = (im - m) / (im.max() - m)\n return im", "def darken_color(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], amount * c[1], c[2])", "def set_hue_noise(self, offset):\n self.hue_noise = offset", "def set_contrast(level):\n send_command(0x81)\n send_command(level)", "def hmean_weighted(x, y, ratio=1.0):\n lweight, rweight = ratio2weights(ratio)\n if x == y:\n return float(x)\n elif x == 0.0 or y == 0.0:\n if lweight == 0.0:\n return float(x)\n elif rweight == 0.0:\n return float(y)\n else:\n return 0.0\n else:\n return _div(x * y, lweight * x + rweight * y)", "def divergedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def _get_neutral_ships(game_data):\n # deal with each abandonned ship\n for ship in game_data['ships'][0].copy():\n position = game_data['ships'][0][ship]\n player = None\n\n # treat the cases where the ship can be captured\n if game_data['board'][position][1] and not game_data['board'][position][2]:\n player = 1\n elif not game_data['board'][position][1] and game_data['board'][position][2]:\n player = 2\n\n # treat the case where the ship is captured\n if player != None:\n if ship in game_data['board'][position][player][ship]:\n new_ship = ship + '_2'\n game_data['board'][position][player][ship] = game_data['board'][position][0][new_ship]\n game_data['ships'][player][ship] = game_data['ships'][0][new_ship]\n\n del game_data['board'][position][0][ship]\n del game_data['ships'][0][ship]", "def photon_to_molecular_fraction(p, qy_ratio):\n return p / (p + qy_ratio * (1 - p))", "def _prune_filters_by_ratio(self,\n scope,\n params,\n ratio,\n place,\n lazy=False,\n only_graph=False,\n param_shape_backup=None,\n param_backup=None):\n if params[0].name() in self.pruned_list[0]:\n return\n param_t = scope.find_var(params[0].name()).get_tensor()\n pruned_idx = self.pruner.cal_pruned_idx(\n params[0].name(), np.array(param_t), ratio, axis=0)\n for param in params:\n assert isinstance(param, VarWrapper)\n param_t = scope.find_var(param.name()).get_tensor()\n if param_backup is not None and (param.name() not in param_backup):\n param_backup[param.name()] = copy.deepcopy(np.array(param_t))\n pruned_param = self.pruner.prune_tensor(\n np.array(param_t), pruned_idx, pruned_axis=0, lazy=lazy)\n if not only_graph:\n param_t.set(pruned_param, place)\n ori_shape = param.shape()\n if param_shape_backup is not None and (\n param.name() not in param_shape_backup):\n param_shape_backup[param.name()] = copy.deepcopy(param.shape())\n new_shape = list(param.shape())\n new_shape[0] = pruned_param.shape[0]\n param.set_shape(new_shape)\n _logger.debug(\n '|----------------------------------------+----+------------------------------+------------------------------|'\n )\n _logger.debug('|{:^40}|{:^4}|{:^30}|{:^30}|'.format(\n str(param.name()),\n str(ratio), str(ori_shape), str(param.shape())))\n self.pruned_list[0].append(param.name())\n return pruned_idx", "def neutralise(self):\n smi = self.smiles\n\n patts = [\n # Imidazoles\n ('[n+;H]','n'),\n # Amines\n ('[N+;!H0]','N'),\n # Carboxylic acids and alcohols\n ('[$([O-]);!$([O-][#7])]','O'),\n # Thiols\n ('[S-;X1]','S'),\n # Sulfonamides\n ('[$([N-;X2]S(=O)=O)]','N'),\n # Enamines\n ('[$([N-;X2][C,N]=C)]','N'),\n # Tetrazoles\n ('[n-]','[nH]'),\n # Sulfoxides\n ('[$([S-]=O)]','S'),\n # Amides\n ('[$([N-]C=O)]','N') ]\n\n reactions = [(Chem.MolFromSmarts(x),Chem.MolFromSmiles(y,False)) for x,y in patts]\n\n m = Chem.MolFromSmiles(smi)\n for i,(reactant, product) in enumerate(reactions):\n while m.HasSubstructMatch(reactant):\n rms = AllChem.ReplaceSubstructs(m, reactant, product)\n m = rms[0]\n\n # it doesn't matter is u choose to output a canonical smiles as the\n # sequence of atoms is changed calling `AllChem.ReplaceSubstructs\n self.smiles = Chem.MolToSmiles(m, isomericSmiles=False) #, canonical=False)", "def white_balance(image, percentage=0.006):\n\n image_uint8 = (255.0 * image).astype(np.uint8)\n\n pixels_total = image.shape[0] * image.shape[1]\n threshold = percentage * pixels_total\n\n _stretch_values(image_uint8, 0, threshold)\n _stretch_values(image_uint8, 1, threshold)\n _stretch_values(image_uint8, 2, threshold)\n\n return image_uint8 / 255.0", "def remove_background(img):\n \n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\n img = img.astype(np.uint8)\n # Binarize the image using OTSU's algorithm. This is used to find the center\n # of mass of the image, and find the threshold to remove background noise\n threshold, _ = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n \n # Remove noise - anything higher than the threshold. Note that the image is still grayscale\n img[img > threshold] = 255\n\n return img", "def render_novel_image(self, idx_0, idx_1, ratio, resolution_level):\n pose_refine = camera.lie.se3_to_SE3(self.sdf_network.se3_refine.weight)\n pose_all = camera.pose.compose([pose_refine, self.dataset.pose_all[:, :3, :]])\n rays_o, rays_d = self.dataset.gen_rays_between(idx_0, idx_1, ratio, resolution_level=resolution_level, pose_all=pose_all)\n H, W, _ = rays_o.shape\n rays_o = rays_o.reshape(-1, 3).split(self.batch_size)\n rays_d = rays_d.reshape(-1, 3).split(self.batch_size)\n\n out_rgb_fine = []\n for rays_o_batch, rays_d_batch in zip(rays_o, rays_d):\n background_rgb = torch.ones([1, 3]) if self.use_white_bkgd else None\n\n render_out = self.renderer.render(rays_o_batch,\n rays_d_batch,\n # near,\n # far,\n cos_anneal_ratio=self.get_cos_anneal_ratio(),\n background_rgb=background_rgb)\n\n out_rgb_fine.append(render_out['color_fine'].detach().cpu().numpy())\n\n del render_out\n\n img_fine = (np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3]) * 256).clip(0, 255).astype(np.uint8)\n return img_fine", "def Contrast(img):\r\n factor = 2 * (np.random.rand() - 0.5) * 128\r\n assert (factor <= 128 and factor >= -128), 'contract factor value wrong'\r\n fvalue = 259.0/255.0 * (factor + 255.0)/(259.0-factor)\r\n img = np.round((img - 128.0)*fvalue + 128.0)\r\n img = np.where(img > 255, 255, img)\r\n img = np.where(img < 0, 0, img)\r\n img = np.uint8(img)\r\n return img", "def NII_ratio_ne(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results()\n\n line1,line2 = '[NII]122','[NII]205'\n L_line1 = getattr(GR,'L_'+line1+'_sun')\n L_line2 = getattr(GR,'L_'+line2+'_sun')\n # Get ratio where the two samples overlap:\n ratio = L_line1 / L_line2\n ne_mw = getattr(GR,'ne_mw')[ratio != 0]\n ratio = ratio[ratio != 0]\n label = '%s / %s' % (line1,line2)\n\n fig,ax = plt.subplots(figsize=(10,8))\n ax.set_xlabel('log ' + getlabel('ne'))\n ax.set_ylabel(label)\n ax.plot(np.log10(ne_mw), ratio, 'o', color='grey', alpha=0.7) \n xs = np.arange(ax.get_xlim()[0],ax.get_xlim()[1],0.1)\n ax.plot(xs,aux.NII_from_logne(xs),'-b')\n\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/NII_ratio_ne_%s%s' % (p.sim_name,p.sim_run),dpi=300)", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def make_skip_connection(self, input_channels, output_channels, expand_ratio, p,\n inplace=False):\n hidden_channels = round(input_channels * expand_ratio)\n return nn.Sequential(OrderedDict([\n ('expansion', self.conv_1x1_bn(\n input_channels, hidden_channels)),\n ('dropout', nn.Dropout2d(p, inplace=inplace)),\n ('reduction', nn.Sequential(*[\n nn.Conv2d(hidden_channels, output_channels, 1),\n self.get_bn_module(output_channels),\n ])),\n ]))", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def safe_invert_gains(image, rgb_gain, red_gain, blue_gain):\n assert image.dim() == 3 and image.shape[0] == 3\n\n gains = torch.tensor([1.0 / red_gain, 1.0, 1.0 / blue_gain]) / rgb_gain\n gains = gains.view(-1, 1, 1)\n\n # Prevents dimming of saturated pixels by smoothly masking gains near white.\n gray = image.mean(dim=0, keepdims=True)\n inflection = 0.9\n mask = ((gray - inflection).clamp(0.0) / (1.0 - inflection)) ** 2.0\n\n safe_gains = torch.max(mask + (1.0 - mask) * gains, gains)\n return image * safe_gains" ]
[ "0.4983211", "0.48789826", "0.48116347", "0.4737011", "0.47265878", "0.46818957", "0.4571823", "0.45462266", "0.44600105", "0.4397929", "0.42731524", "0.427132", "0.4269558", "0.42693788", "0.42575642", "0.42499575", "0.42406985", "0.42318156", "0.4226033", "0.4211065", "0.41977745", "0.41921994", "0.41916838", "0.41903993", "0.41874236", "0.41733915", "0.41585425", "0.41452932", "0.41436285", "0.41376895", "0.41347808", "0.41263187", "0.4117085", "0.41090634", "0.41062918", "0.4106135", "0.41030654", "0.40843207", "0.40710133", "0.4059886", "0.40592566", "0.4051465", "0.40478978", "0.4045058", "0.40421006", "0.40378997", "0.40351143", "0.40179488", "0.40128812", "0.40109015", "0.40108427", "0.4007994", "0.40026098", "0.40026098", "0.39926383", "0.3992549", "0.3989411", "0.39856243", "0.39771876", "0.39702955", "0.39698708", "0.39600834", "0.39573964", "0.39472237", "0.39432615", "0.39247945", "0.3916762", "0.3911685", "0.39116737", "0.3910721", "0.39083225", "0.3897332", "0.38896036", "0.38826463", "0.38818133", "0.38767317", "0.38736564", "0.38718987", "0.386939", "0.38662502", "0.38622487", "0.38583618", "0.38577262", "0.38545683", "0.3854304", "0.385341", "0.38528633", "0.3849883", "0.3843832", "0.38431567", "0.3840769", "0.38383228", "0.38360915", "0.38328642", "0.3832035", "0.38267", "0.38162822", "0.38112608", "0.380729", "0.38066646" ]
0.7399279
0
It normalizes the last dimension of an ndarray to sum to 1. It can be used to convert (batches of) vectors to stochastic vectors or (batches of) matrices to right stochastic matrices. Right stochastic matrices are also called transitions matrices.
def stochastic(x: np.ndarray) -> np.ndarray: n = np.linalg.norm(x, 1, axis=-1, keepdims=True) # n = np.sum(x, axis=-1, keepdims=True) # todo: same result (except dtype), which is faster? with np.errstate(invalid="raise"): # see: `normalized` return x / n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(self,matrix):\n for i in range(self.N):\n matrix[self.N-1][i] = 0\n for i in range(self.n):\n matrix[self.N - 1][self.index(i,i)] = 1\n return matrix", "def normalize(a, axis=None):\n a_sum = a.sum(axis)\n if axis and a.ndim > 1:\n a_sum[a_sum == 0] = 1\n shape = list(a.shape)\n shape[axis] = 1\n a_sum.shape = shape\n\n return a / a_sum", "def normalize(v):\n\tdim = v.shape \n\tfor i in range(0, dim[0]-1):\n\t\tv[i,:,:] = (v[i,:,:].T/np.sum(v[i,:,:],1)).T\n\n\treturn v", "def __normalize_after_fft(arr):\n\n n1, n2 = arr.shape[0], arr.shape[1]\n for i in range(n1):\n for j in range(n2):\n arr[i, j] *= n1 * n2\n\n return arr", "def _normalize(X: np.ndarray) -> np.ndarray:\n # return X * np.sqrt(1 / np.sum(X ** 2, axis=1))[:, None]\n return X * np.sqrt(X.shape[1] / np.sum(X ** 2, axis=1))[:, None]", "def normalize_array(array):\n\n return array / np.sum(array, axis=1)[:, np.newaxis]", "def normalize_vecs(mat):\n #convert to array for operation\n order = len(mat.shape) - 1\n mat = COO(mat)\n row_sums = mat.sum(axis=order)\n mat = DOK(mat)\n for point in mat.data:\n divisor = row_sums[point[:-1]]\n mat[point] = mat[point] / divisor\n mat = COO(mat)\n return mat", "def normalize(self):\n self.desc += \", normalize\"\n self._vecs /= np.linalg.norm(self._vecs, axis=1)[:, np.newaxis]\n self.reindex()", "def _norm_along_last_axis(x):\n return np.sqrt(np.sum(np.square(x), axis=x.ndim - 1))", "def normalize(x):\n\n x_norm = np.linalg.norm(x, axis=1, keepdims=True)\n print(x_norm)\n x = x / x_norm\n ### END\n\n return x", "def norm1(X):\r\n # pass\r\n if X.shape[0]*X.shape[1] == 0:\r\n return 0\r\n return abs(X).sum()\r\n # return LA.norm(X, 1)\r", "def normalise(self):\n return self / self.mean(axis=1).reshape(self.shape[0], 1)", "def normalize(input_matrix):\n\n row_sums = input_matrix.sum(axis=1)\n try:\n assert (np.count_nonzero(row_sums)==np.shape(row_sums)[0]) # no row should sum to zero\n except Exception:\n raise Exception(\"Error while normalizing. Row(s) sum to zero\")\n new_matrix = input_matrix / row_sums[:, np.newaxis]\n return new_matrix", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def normalize_transition(self):\n self._t /= self._t.sum(1)[:, np.newaxis]", "def l1_normalize(x: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name\n return x / x.sum()", "def normalize(w: torch.Tensor):\n\n if w.dim() > 1:\n return _matrix(w)\n\n return _vector(w)", "def normalise(x, dim=1):\n norm = torch.sqrt( torch.pow(x,2.).sum(dim) )\n if dim>0:\n x /= norm.unsqueeze(dim)\n return x", "def normalise1D(*vector):\n\n vector = np.array(vector).flatten() # 1D vector\n\n norm = np.linalg.norm(vector) # vector norm\n if norm == 0: return vector # vector is 0\n return vector/norm", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def normalizeRows(x):\n N = x.shape[0]\n x /= np.sqrt(np.sum(x ** 2, axis=1)).reshape((N, 1)) + 1e-30\n return x", "def normalizeRows(x):\n\n ### YOUR CODE HERE\n # using l2 norm to normalize\n x = x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))\n ### END YOUR CODE\n\n return x", "def normalise(dataset):\n # Scale images to the [0, 1] range\n dataset = dataset.astype(\"float32\") / 255\n # Make sure images have shape (28, 28, 1)\n return np.expand_dims(dataset, -1)", "def normalize_vector_array (vector_array ):\r\n norms = np.linalg.norm (vector_array, axis=1 )\r\n norms = np.where (norms == 0, 1, norms ) # these filtered values belong to arrays that already are normalized\r\n\r\n return vector_array / norms.reshape (-1, 1 )", "def BatchNormalize(S):\n mu = np.mean(S, axis=0)\n v = np.mean((S-mu)**2, axis=0)\n S = (S - mu) / np.sqrt(v + epsilon)\n return S", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def normalize(array, norm=\"l2\"):\n scaler = Normalizer(copy=True, norm=norm)\n return scaler.fit_transform(array)", "def normalize(inp):\n\n out = inp / np.linalg.norm(inp, axis=1, keepdims=True)\n\n return out", "def normalize(a):\n a = np.array(a)\n return a / np.linalg.norm(a)", "def normalize(X, norm=..., *, axis=..., copy=..., return_norm=...):\n ...", "def normalised(a: np.ndarray, order: int = None, axis: int = -1) -> np.ndarray:\n norm = np.atleast_1d(np.linalg.norm(a, order, axis))\n return a / np.expand_dims(norm, axis)", "def standardize_single_array(x):\n if x is None:\n return None\n if tensor_util.is_tensor(x):\n x_shape_ndims = array_ops.rank(x)\n else:\n x_shape_ndims = len(x.shape)\n\n if (x_shape_ndims == 1 and (expected_shape is None or len(expected_shape) != 1)):\n if tensor_util.is_tensor(x):\n x = array_ops.expand_dims(x, axis=1)\n else:\n x = np.expand_dims(x, 1)\n return x", "def normalize_matrix(matrix, axis=1):\n if len(matrix.shape) == 1:\n # turn vector into matrix with one row\n matrix = matrix[np.newaxis, :]\n divisor = np.linalg.norm(matrix, axis=axis)[:, np.newaxis]\n # only normalize where divisor is not zero\n result = np.divide(matrix, divisor, out=np.zeros(matrix.shape), where=divisor != 0)\n return result", "def _normalize(x):\n tol = 1e-10\n dims = x.shape\n\n x = x.flatten()\n inverse = (np.sum(x**2) + tol) ** -.5\n x = x * inverse\n x = np.reshape(x, dims)\n\n return x", "def normalizeRows(x):\n\n ### YOUR CODE HERE\n norm2 = np.linalg.norm(x,2,axis = 1).reshape(x.shape[0],-1)\n x = x/norm2\n ### END YOUR CODE\n\n return x", "def _frz(a):\n if a.ndim == 0:\n a.shape = (1,)\n return a", "def normalise(x):\n x = np.copy(x)\n n_cols = x.shape[1]\n for col_index in range(n_cols):\n col = x[:, col_index]\n factor = np.max(col)\n x[:, col_index] = col / factor\n\n return x", "def normalize(self):\n self.number_of_vectors = self.values.shape[0]\n norm_2 = np.linalg.norm(self.values, axis=1)\n norm_1 = np.sum(self.values_planar, axis=1)\n norm_2 = np.repeat(norm_2, self.number_of_objectives).reshape(\n self.number_of_vectors, self.number_of_objectives\n )\n norm_1 = np.repeat(norm_1, self.number_of_objectives).reshape(\n self.number_of_vectors, self.number_of_objectives\n )\n norm_2[norm_2 == 0] = np.finfo(float).eps\n self.values = np.divide(self.values, norm_2)\n self.values_planar = np.divide(self.values_planar, norm_1)", "def normalize(array):\n\treturn array/np.max(array)", "def max_normalization(array):\n return 1/np.max(array) * array.squeeze(axis=1)", "def normalise(self,data,take_logs:bool=False):\n\n # Normalise vector to sum up to 1\n normalised_vector = data/np.sum(data)\n\n # If take logs is selected, take logs\n if take_logs:\n return np.log(normalised_vector)\n else:\n return normalised_vector", "def StandardizeMatrix(mat):\n nObjs = len(mat)\n avgs = sum(mat, 0) / float(nObjs)\n mat -= avgs\n devs = math.sqrt(sum(mat * mat, 0) / (float(nObjs - 1)))\n try:\n newMat = mat / devs\n except OverflowError:\n newMat = numpy.zeros(mat.shape, 'd')\n for i in range(mat.shape[1]):\n if devs[i] != 0.0:\n newMat[:, i] = mat[:, i] / devs[i]\n return newMat", "def normalize(x, axis=-1):\n x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\n return x", "def normalize(x, axis=-1):\n x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\n return x", "def normalize(x, axis=-1):\n x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\n return x", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X", "def ensemble_one_norm(self):\n return sum(sum(self.ensemble_transition_matrix))", "def normalize(batch_img: np.ndarray) -> np.ndarray:\n batch_img = batch_img.astype('float32')\n return batch_img / 127.5 - 1", "def normalize_data(img):\n nor = np.linalg.norm(img, axis = 1)\n nor = np.reshape(nor, (len(img), 1))\n img = np.divide(img, nor)\n return img", "def normalize(self):\n self.vector /= np.linalg.norm(self.vector)", "def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr", "def normalizerows(x):\n # Compute x_norm as the norm 2 of x. Use np.linalg.norm(..., ord=2, axis= ..., keepdims=True)\n x_norm = np.linalg.norm(x, ord=2, axis=1, keepdims=True)\n\n #Divide x by norm\n x = x / x_norm\n\n return x", "def batch_norm(input_tensor):\n epsilon = 1e-3\n batch_mean, batch_var = tf.nn.moments(input_tensor, [0])\n input_tensor = tf.nn.batch_normalization(input_tensor, mean=batch_mean, variance=batch_var, offset=None,\n scale=None, variance_epsilon=epsilon)\n\n return input_tensor", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def StandardizeMatrix(mat):\n nObjs = len(mat)\n avgs = sum(mat,0)/float(nObjs)\n mat -= avgs\n devs =sqrt(sum(mat*mat,0)/(float(nObjs-1)))\n try:\n newMat = mat/devs\n except OverflowError:\n newMat = numpy.zeros(mat.shape,'d')\n for i in range(mat.shape[1]):\n if devs[i] != 0.0:\n newMat[:,i] = mat[:,i]/devs[i]\n return newMat", "def normalize(my_vector):\n my_vector = np.array(my_vector)\n size = len(my_vector)\n\n sum_ = sum(my_vector)\n if sum_ != 0.0:\n for i in range(size):\n my_vector[i] = my_vector[i] / sum_\n return my_vector", "def normalize(self):\r\n max = np.amax(self.matrix)\r\n min = np.amin(self.matrix)\r\n\r\n self.matrix = ((self.matrix - min) / (max - min))", "def _r1_normalize(cmat):\n dmat = cmat\n smat = np.diag(dmat) + 1 # in case some label has no correct prediction (0 in diag)\n dim = cmat.shape[0]\n xmat = np.zeros([dim, dim])\n for i in range(dim):\n for j in range(i + 1, dim):\n xmat[i, j] = xmat[j, i] = max(dmat[i, j] / smat[j], dmat[j, i] / smat[i])\n\n # scale matrix to 0-1\n xmat = xmat / np.max(xmat)\n\n return xmat", "def normalize(arr, eps):\n\n norm = cuda.reduce('T x', 'T out',\n 'x * x', 'a + b', 'out = sqrt(a)', 0,\n 'norm_sn')(arr)\n cuda.elementwise('T norm, T eps',\n 'T x',\n 'x /= (norm + eps)',\n 'div_sn')(norm, eps, arr)\n return norm", "def normalize(self):\n self._vectors = [vector.normalized() for vector in self._vectors]", "def normalize(nparray, order=2, axis=0):\n norm = np.linalg.norm(nparray, ord=order, axis=axis, keepdims=True)\n return nparray / (norm + np.finfo(np.float32).eps)", "def normalize_all(self):\n #for i, vector in enumerate(self.real_vectors):\n # self.real_vectors[i] /= np.linalg.norm(vector)\n self.vectors /= np.linalg.norm(self.vectors, axis=1).reshape(-1,1)\n for i, vector in enumerate(self.real_vectors):\n vector.set(self.vectors[i])", "def test_normalize_matrix(self):\n input_matrix = [\n [0, 1.0],\n [1.0, 1.0]\n ]\n\n expected = [\n [0, 1],\n [0.5, 0.5]\n ]\n\n result = self.summarizer.normalize_matrix(input_matrix)\n\n self.assertEqual(expected, result)", "def normalize(self):\n det = self._mat[0][0]*self._mat[1][1] - self._mat[0][1]*self._mat[1][0]\n for i in range(2):\n for j in range(2):\n self._mat[i][j] = (self._mat[i][j])/(np.sqrt(det))", "def normalize_vectors(motion_vectors):\n if np.shape(motion_vectors)[0] == 0:\n return motion_vectors\n else:\n motion_vectors[:, 7] = motion_vectors[:, 7] / motion_vectors[:, 0] # motion_x\n motion_vectors[:, 8] = motion_vectors[:, 8] / motion_vectors[:, 0] # motion_y\n motion_vectors[:, 0] = -1 * np.ones_like(motion_vectors[:, 0])\n return motion_vectors", "def normalize(self, x, axis=-1):\n x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\n return x", "def _normalize_(x: np.array) -> np.array:\n if x.max() != 0:\n x = x / x.max()\n return np.clip(x, 0, 1)# ensure that no values are >1\n else:\n raise ZeroDivisionError('Image Normalization')", "def normalize(tensor: np.ndarray):\n if len(tensor.shape) < 4:\n tensor = np.expand_dims(tensor, axis=2)\n mean = np.array([tensor[..., chn, :].mean() for chn in range(tensor.shape[2])])\n std = np.array([tensor[..., chn, :].std() for chn in range(tensor.shape[2])])\n return (tensor - mean[:, np.newaxis]) / std[:, np.newaxis]", "def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)", "def normalization(imgs):\n\n imgs = np.asarray(imgs).astype(np.float32)\n imgs = np.expand_dims(imgs / 255, axis=-1)\n return imgs", "def norm1(x):\n n, p = x.shape\n if p == 1 or n == 1:\n return np.sum(np.abs(x))\n else:\n return np.max(np.sum(np.abs(x), axis=0))", "def standardise(self):\n if self.vector.shape is ():\n return\n if self.dimensionality() != 1:\n # TODO: implement\n raise NotImplementedError\n max_value = 1.0 * max(self.vector)\n if max_value == 0.0:\n # Nothing to do\n return\n self.vector = self.vector.astype('float64') / max_value", "def normalize_matrix_on_axis(m, axis=0, copy=True):\n if axis == 0:\n ret = m / np.sqrt(np.sum(m ** 2, axis=axis))\n elif axis == 1:\n ret = normalize_matrix_on_axis(m.T).T\n else:\n raise Exception('Only for 2D array.')\n if copy:\n ret = ret.copy()\n return ret", "def denormalize(batch_img: np.ndarray) -> np.ndarray:\n return np.uint8((batch_img + 1) * 127.5)", "def flatten_numpy(ndarray):\n return np.reshape(ndarray, (-1,), 'F')", "def normalize(vectors):\n if len(np.asarray(vectors).shape) == 1:\n return vectors / np.linalg.norm(vectors)\n norm = np.linalg.norm(vectors, axis=1)\n return vectors / norm[:, np.newaxis]", "def normalise(array,tot=1.0):\r\n tot1 = np.sum(np.abs(array)**2)\r\n if tot1 == 0.0 :\r\n print 'bg.normalise : warning sum array = 0'\r\n arrayout = np.copy(array)\r\n else :\r\n arrayout = array * np.sqrt(tot / tot1)\r\n return arrayout", "def test_normalization(self):\n u = np.array([np.array([0.7, 1.2]), np.array([0.5, 1.6])])\n with tf.Session() as sess:\n n = sess.run(AbstractModel.l2_normalization_layer(u, axis=1))\n magnitude = np.linalg.norm(n, axis=1)\n np.testing.assert_allclose(magnitude, np.array([1.0, 1.0]))", "def normalize(X, axis=-1, order=2):\n l2 = np.atleast_1d(np.linalg.norm(X, order, axis))\n l2[l2 == 0] = 1\n return X / np.expand_dims(l2, axis)", "def snorm(x):\n return np.dot(x.flatten().T, x.flatten())", "def normalised(cls, mat, axis=-1, order=2):\n norm = np.linalg.norm(\n mat, axis=axis, ord=order, keepdims=True)\n norm[norm == 0] = 1\n return mat / norm", "def _normalize(a: np.ndarray, u: float=0, s: float=1) -> np.ndarray:\n a_norm = (a - np.mean(a)) / (np.std(a) + STABILITY)\n a_rescaled = a_norm * s + u\n\n return a_rescaled", "def _scale(self, normalize, mat):\n mat = mat.astype(float)\n if normalize:\n mat = sklearn_norm(mat,\n feature_range=(0, 1),\n axis=0,\n copy=True)\n else:\n return mat\n return mat", "def normalize_totensor(self, image):\n x = self.loader2(image)\n x = x.repeat(1, 1, 1, 1)\n if is_cuda: x = x.cuda()\n return x", "def normalize(A: np.array) -> np.array:\n for i in range(A.shape[1]):\n A[:, i] = (A[:, i] - np.min(A[:, i])) / (np.max(A[:, i] - np.min(A[:, i])))\n return A", "def normalize(self):\n self._data /= self.norm()", "def _normalize_tensor(input_tensor):\n\n rms_tensor = K.sqrt(K.mean(K.square(input_tensor)))\n return input_tensor / (rms_tensor + K.epsilon())", "def normFloatArray(imgIn):\n imgOut = imgIn.copy()\n if imgIn.max()==imgIn.min():\n imgOut = np.zeros(imgIn.shape)\n elif len(imgIn.shape)==2: \n imgOut = (imgOut - imgOut.min())/(imgOut.max()-imgOut.min())\n elif len(imgIn.shape)==3:\n for c in range(3):\n imgOut[:,:,c] = (imgOut[:,:,c] - imgOut[:,:,c].min())/(imgOut[:,:,c].max()-imgOut[:,:,c].min())\n return imgOut.astype(np.float32)", "def normalize(v):\n return np.array(v) / np.linalg.norm(v)", "def normalize_col(input_matrix):\n\n col_sums = np.nan_to_num(input_matrix).sum(axis=0, keepdims=True)\n\n #new_matrix = input_matrix / col_sums if np.isscalar(col_sums) else input_matrix / col_sums[np.newaxis, :]\n new_matrix = np.divide(input_matrix, col_sums)\n return np.nan_to_num(new_matrix)", "def batch_norm(x: tf.Tensor) -> tf.Tensor:\n return slim.batch_norm(x, activation_fn=tf.nn.relu, scope='postnorm')", "def reshape_normalise(img):\n\t# The image shape is expected to match the input of VGG19\n\timg = np.resize(img, (1, CONFIG.IMAGE_HEIGHT, CONFIG.IMAGE_WIDTH, CONFIG.COLOR_CHANNELS)).astype('float32')\n\timg -= CONFIG.MEAN_PIXEL\n\treturn img", "def L1Norm(X):\n return max(np.sum(X,axis=0))", "def norm_layer( x, training, name):\n top = tf.layers.batch_normalization( x, \n axis=3, # channels last \n training=training,\n name=name )\n return top", "def normalized(a, axis=-1, order=2):\n l2 = np.atleast_1d(np.linalg.norm(a, order, axis))\n l2[l2==0] = 1\n return a / np.expand_dims(l2, axis)", "def normalize_sum_to_unity(x):\n if x.ndim == 3:\n return np.nan_to_num(x / x.sum(-1)[..., np.newaxis])\n else:\n return np.nan_to_num(x / x.sum(-1).reshape(-1, 1))" ]
[ "0.68129104", "0.67557216", "0.6609631", "0.6585958", "0.645604", "0.6451531", "0.6302512", "0.62421376", "0.6205158", "0.61722827", "0.6167662", "0.61229944", "0.61031413", "0.6083917", "0.6082263", "0.60746324", "0.6066852", "0.60581243", "0.605747", "0.6040707", "0.6040707", "0.6039398", "0.6037889", "0.6030601", "0.6026608", "0.60131353", "0.60006994", "0.600018", "0.5993755", "0.5976607", "0.5976116", "0.5956681", "0.5954011", "0.5952519", "0.594333", "0.5935401", "0.5931775", "0.592838", "0.5923514", "0.5917637", "0.5891393", "0.58901954", "0.58832467", "0.58765024", "0.58765024", "0.58765024", "0.5859", "0.5857119", "0.58567584", "0.5852832", "0.5841847", "0.58311296", "0.5830953", "0.582469", "0.5823693", "0.5823693", "0.5823693", "0.5823693", "0.58236617", "0.58172536", "0.58067536", "0.5803455", "0.5783423", "0.5779725", "0.5769518", "0.5758921", "0.5756704", "0.5738898", "0.5737258", "0.57294136", "0.57260704", "0.571716", "0.5714913", "0.57047087", "0.5698091", "0.56964105", "0.5693688", "0.5691492", "0.5677209", "0.5659388", "0.56591886", "0.56589526", "0.56459117", "0.5645611", "0.5645183", "0.5642363", "0.5625274", "0.5610875", "0.56068593", "0.5606339", "0.56026673", "0.55993414", "0.5598316", "0.55837214", "0.55784625", "0.5576462", "0.55743724", "0.5574222", "0.5570814", "0.5570245" ]
0.59728706
31
Viterbi algorithm for finding the optimal path. One square transition matrix can be specified.
def viterbi_dense( p_emit: np.ndarray, p_trans: np.ndarray, p_trans0: Optional[np.ndarray] = None, mask: Optional[np.ndarray] = None ) -> np.ndarray: batch_size, T, N = p_emit.shape if mask is None: mask = np.ones((batch_size, T), dtype=p_trans.dtype) if p_trans0 is None: p_trans0 = np.zeros(N, dtype=p_emit.dtype) return _viterbi_dense_masked(p_emit, p_trans, p_trans0, mask)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def original_solution():\n matrix = get_data()\n # Construct Graph\n G = nx.DiGraph()\n rows, cols = len(matrix), len(matrix[0])\n for r in xrange(rows):\n for c in xrange(cols):\n if 0 < c:\n G.add_edge(r*cols + c, r*cols + c - 1, weight=matrix[r][c-1])\n if c < cols-1:\n G.add_edge(r*cols + c, r*cols + c + 1, weight=matrix[r][c+1])\n if 0 < r:\n G.add_edge(r*cols + c, (r-1)*cols + c, weight=matrix[r-1][c])\n if r < rows-1:\n G.add_edge(r*cols + c, (r+1)*cols + c, weight=matrix[r+1][c])\n # Calculate shortest path\n path = nx.shortest_path(G, 0, rows*cols-1, weighted=True)\n \n # Get cost for path\n s = 0\n for p in path:\n c = p % cols\n r = (p - c) / rows\n s += matrix[r][c]\n return s", "def viterbi_path(prior, transmat, observ_likelihood):\n T = observ_likelihood.shape[-1]\n N = observ_likelihood.shape[0]\n\n path = numpy.zeros(T, dtype=numpy.int32)\n global_score = numpy.zeros(shape=(N,T))\n predecessor_state_index = numpy.zeros(shape=(N,T), dtype=numpy.int32)\n\n t = 1\n global_score[:, 0] = prior * observ_likelihood[:, 0]\n # need to normalize the data\n global_score[:, 0] = global_score[:, 0] /sum(global_score[:, 0] )\n \n for t in range(1, T):\n for j in range(N):\n temp = global_score[:, t-1] * transmat[:, j] * observ_likelihood[j, t]\n global_score[j, t] = max(temp)\n predecessor_state_index[j, t] = temp.argmax()\n\n global_score[:, t] = global_score[:, t] / sum(global_score[:, t])\n\n path[T-1] = global_score[:, T-1].argmax()\n \n for t in range(T-2, -1, -1):\n path[t] = predecessor_state_index[ path[t+1], t+1]\n\n return [path, predecessor_state_index, global_score]", "def viterbi(log_emlik, log_startprob, log_transmat, forceFinalState=True):\n N, M = log_emlik.shape # (# timesteps, # states)\n B = np.zeros((N,M))\n V = np.zeros((N,M)) \n\n # initialisation\n V[0,:] = log_startprob + log_emlik[0,:] \n\n # induction\n for t in range(1,N):\n # vectorise\n x = np.tile(V[t-1,:],(M,1)) + log_transmat.T\n V[t,:] = np.max(x, axis=1) + log_emlik[t,:]\n B[t,:] = np.argmax(x, axis=1)\n\n # recover best path, looking for state sequence S that maximises P(S,X|emission probs)\n # TODO if forceFinalState\n end_state = np.argmax(V[N-1,:]) \n \n viterbi_path = [B[N-1,end_state]]\n viterbi_loglik = np.max(V[N-1,:])\n\n s_star = int(end_state)\n for t in range(N-2,-1,-1):\n s_star = int(B[t+1,s_star]) # optimal state at timestep t\n viterbi_path.append(s_star)\n\n assert len(viterbi_path) == N\n\n return viterbi_loglik, viterbi_path[::-1]", "def find_min_hamiltonian_path(G,weights,probs_instead_of_weights=False):\n\n # Create a new model\n m = Model(\"hamiltonian_cycle\")\n \n # Create variables\n x_vars = {}\n u_vars = {}\n for var1 in permute(G.vertices()):\n for var2 in permute(G.vertices()):\n if var1 != var2:\n x_vars[(var1,var2)] = m.addVar(vtype='B', name=\"x_\"+str(var1)+'_'+str(var2))\n u_vars[var1] = m.addVar(vtype=GRB.INTEGER, name=\"u_\"+str(var1))\n m.update()\n \n for var in G.vertices():\n if var != START_NODE:\n cur_incoming = LinExpr([(1.0,v) for k,v in x_vars.items() if (k[1] == var)])\n #print(cur_incoming)\n m.addConstr(cur_incoming,GRB.EQUAL,1.0)\n \n if var != END_NODE:\n cur_outgoing = LinExpr([(1.0,v) for k,v in x_vars.items() if (k[0] == var)])\n #print(cur_outgoing)\n m.addConstr(cur_outgoing,GRB.EQUAL,1.0)\n \n for var1 in G.vertices():\n for var2 in G.vertices():\n if var1 != var2:\n c = LinExpr([(1.0,u_vars[var1]),(-1.0,u_vars[var2]),(G.num_vertices(),x_vars[(var1,var2)])])\n #print(c)\n m.addConstr(c,GRB.LESS_EQUAL,G.num_vertices()-1)\n \n # Set objective\n #try:\n edge_weights = permute(G.get_edge_weights(weights))\n if probs_instead_of_weights:\n all_probs = []\n for v in G.vertices():\n if v != END_NODE:\n batch_scores = [(e,w) for e,w in edge_weights if e[0] == v]\n S = logsumexp([x[1] for x in batch_scores])\n batch_scores = [(e,np.exp(w-S)) for e,w in batch_scores]\n all_probs.extend(batch_scores)\n edge_weights = all_probs\n objective = LinExpr([(weight,x_vars[edge]) for edge,weight in edge_weights])\n #except TypeError:\n # return None\n \n m.setObjective(objective,GRB.MINIMIZE)\n m.update()\n code = m.optimize()\n \n try:\n return [k for k,v in x_vars.items() if v.x > 0.98]\n except GurobiError:\n return None", "def viterbi(self):\n # initialisation\n self.phi = zeros((self.noOfEmmittingStates+2, self.T + 1))\n self.phi[0,0] = 1.0\n for i in range(1,self.noOfEmmittingStates+2):\n self.phi[i,0] = 0.0\n for t in range(1,self.T+1):\n self.phi[0,t] = 0.0\n self.traceback = zeros((self.noOfEmmittingStates+1, self.T+1))\n\n # main recursion\n for t in range(1, self.T + 1):\n for j in range(1, self.noOfEmmittingStates + 1):\n phiTemp = zeros((self.noOfEmmittingStates + 1, 1))\n for k in range(self.noOfEmmittingStates+1):\n phiTemp[k,0] = self.phi[k,t-1] * self.transitionMatrix[k, j-1]\n self.traceback[j-1,t-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[j, t] = phiTemp.max(0) * self.b[j-1, t-1]\n\n # last column - set states which can't reach term to 0, sub for term\n for j in range(1,self.noOfEmmittingStates + 1):\n if self.transitionMatrix[j,-1] == 0:\n self.phi[j,-1] = 0\n phiTemp = zeros((self.noOfEmmittingStates+1, 1))\n for k in range(self.noOfEmmittingStates + 1):\n phiTemp[k,0] = self.phi[k,-1] * self.transitionMatrix[k,-1]\n self.traceback[-1,-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[-1,-1] = phiTemp.max(0)", "def run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\r\n\r\n # SHAPES \r\n # N = 5, L = 3\r\n # emission_scores = (5,3), trans_scores = (3,3)\r\n # start_scores = (3,), end_scores = (3,)\r\n\r\n # Creating the transition DP matrix\r\n T = [[0 for _ in range(N)] for _ in range(L)]\r\n backpointers = [[0 for _ in range(N)] for _ in range(L)]\r\n\r\n # Filling the first column\r\n for row in range(L):\r\n T[row][0] = emission_scores[0][row] + start_scores[row] # emission_scores matrix is (N X L)\r\n \r\n # Filling the rest of the transition matrix\r\n for col in range(1, N):\r\n for row in range(L):\r\n prev_list = []\r\n for prev_label in range(L):\r\n prev_list.append(trans_scores[prev_label, row] + T[prev_label][col-1])\r\n T[row][col] = max(prev_list) + emission_scores[col][row] \r\n backpointers[row][col] = np.argmax(prev_list)\r\n\r\n # Filling the last column\r\n for row in range(L):\r\n T[row][N-1] += end_scores[row]\r\n\r\n # print for debug\r\n # print \"T\"\r\n # for i in T:\r\n # print i\r\n \r\n # print \r\n # print\r\n\r\n # print \"B\"\r\n # for i in backpointers:\r\n # print i\r\n\r\n # Finding max score in last column of T matrix\r\n T = np.array(T)\r\n score = np.asscalar(np.max(T[:,N-1]))\r\n location = np.asscalar(np.argmax(T[:,N-1]))\r\n\r\n # Getting best sequence from right to left using backpointers\r\n y = [location]\r\n for col in range(N-1, 0, -1):\r\n y.insert(0, backpointers[location][col])\r\n location = backpointers[location][col]\r\n\r\n '''\r\n y = []\r\n for i in xrange(N):\r\n # stupid sequence\r\n y.append(i % L)\r\n # score set to 0\r\n return (0.0, y)\r\n '''\r\n return (score, y)", "def recommend_pathway(user_jobs, job_graph, goal_state, min_likelihood_thr):\r\n user_jobs_for_mdp = [user_jobs[0]]\r\n mdp = MDP(job_graph, user_jobs_for_mdp, goal_state, min_likelihood_thr=min_likelihood_thr)\r\n return mdp.solve_mdp()", "def viterbi(self, idx1_p, idx2_p, idx1_t, idx2_t):\r\n\r\n # length of each string\r\n len_a = idx2_t - idx1_t + 1\r\n len_b = idx2_p - idx1_p + 1\r\n\r\n # dp table\r\n # its contents is edit distance, dx, dy to the best previous path\r\n dp = [[(0, -1, -1) for x in range(len_a+1)] for y in range(len_b+1)]\r\n\r\n # initialize first row and first column\r\n dp[0][0] = [0, 0, 0]\r\n for i in range(1, len_a+1):\r\n dp[0][i] = [i, 0, -1]\r\n\r\n for i in range(1, len_b+1):\r\n dp[i][0] = [i, -1, 0]\r\n\r\n # dp update\r\n for i in range(1, len_b+1):\r\n for j in range(1, len_a+1):\r\n index_a = j-1\r\n index_b = i-1\r\n\r\n cost = self._compute_cost(self.pattern[idx1_p+index_b][0], self.text[idx1_t+index_a][0].lower())\r\n cost_p = 1\r\n cost_t = 1\r\n \r\n dx = 0\r\n dy = 0\r\n mincost = 0\r\n\r\n if dp[i-1][j][0] < dp[i][j-1][0]:\r\n dx = -1\r\n dy = 0\r\n mincost = dp[i-1][j][0] + cost_p\r\n else:\r\n dx = 0\r\n dy = -1\r\n mincost = dp[i][j-1][0] + cost_t\r\n\r\n if dp[i-1][j-1][0] + cost < mincost:\r\n dx = -1\r\n dy = -1\r\n mincost = dp[i-1][j-1][0] + cost\r\n\r\n dp[i][j] = [mincost, dx, dy]\r\n\r\n # backward to get the best_start_index\r\n cx = len_b\r\n cy = len_a\r\n\r\n # if self.align_output:\r\n # align_right_f = open(\"./output/\"+self.label+\"_align_right\", \"w\")\r\n # align_wrong_f = open(wrong_path, \"w\")\r\n # align_output_f = open('./output/'label+'_align_right', 'w', encoding='utf-8')\r\n # align_output_f = open('./output/'label+'_align_wrong', 'w', encoding='utf-8')\r\n # right_align = dict()\r\n # wrong_align = dict()\r\n\r\n while cy != 0:\r\n dx = dp[cx][cy][1]\r\n dy = dp[cx][cy][2]\r\n\r\n if dx == 0 and dy == -1: # deal with deletion\r\n self.text[idx1_t+cy-1][1] = 0\r\n else:\r\n self.text[idx1_t+cy-1][1] = self.pattern[idx1_p+cx-1][1]\r\n self.text[idx1_t+cy-1][2] = self.pattern[idx1_p+cx-1][2]\r\n\r\n # for alignment statistics\r\n # if self.text[idx1_t+cy-1][0] == self.pattern[idx1_p+cx-1][0]:\r\n # word_label = self.text[idx1_t+cy-1][0]\r\n # if word_label in right_align:\r\n # right_align[word_label] += 1\r\n # else:\r\n # right_align[word_label] = 1\r\n # else:\r\n if self.text[idx1_t+cy-1][0] != self.pattern[idx1_p+cx-1][0]:\r\n text_label = self.text[idx1_t+cy-1][0]\r\n pattern_label = self.pattern[idx1_p+cx-1][0]\r\n # if (text_label, pattern_label) in wrong_align:\r\n # wrong_align[(text_label, pattern_label)] += 1\r\n # else:\r\n # wrong_align[(text_label, pattern_label)] = 1\r\n\r\n\r\n cx += dx\r\n cy += dy\r\n\r\n # right_align_s = sorted(right_align.items(), key=operator.itemgetter(1), reverse=True)\r\n # wrong_align_s = sorted(wrong_align.items(), key=operator.itemgetter(1), reverse=True)\r\n\r\n # for r in right_align_s:\r\n # align_right_f.write(\"{}\\t{}\\n\".format(r[0], r[1]))\r\n # for w in wrong_align_s:\r\n # align_wrong_f.write(\"{}\\t{}\\t{}\\n\".format(w[0][0], w[0][1], w[1]))\r\n\r\n # align_right_f.close()\r\n # align_wrong_f.close()\r\n\r\n return self.text", "def viterbi(adj_matrix, label_sequence, starting_vertex):\n\n assert adj_matrix, \"adj_matrix is None or empty.\"\n n = len(adj_matrix) # vertex count.\n for row in adj_matrix:\n assert len(row) == n, \"adj_matrix is not square.\"\n\n assert 0 <= starting_vertex <= n - 1, \"starting_vertex out of range.\"\n\n assert label_sequence, \"label_sequence is None or empty.\"\n k = len(label_sequence)\n for l in label_sequence:\n assert isinstance(l, int) and l > 0, \"label ids must be positive integers.\"\n\n p = [[0 for _ in range(0, k)] for _ in range(0, n)]\n for j in range(k - 1, -1, -1):\n for beg in range(0, n):\n for end in range(0, n):\n if not adj_matrix[beg][end]: # No edge from i to r.\n continue\n\n assert isinstance(adj_matrix[beg][end], AdjMatrixElem),\\\n \"adj_matrix[%d][%r] is not an AdjMatrixElem\" % (beg, end)\n elem = adj_matrix[beg][end]\n if elem.label_id != label_sequence[j]:\n continue\n\n later_prob = 1 if j == k - 1 else p[end][j + 1]\n if elem.probability * later_prob > p[beg][j]:\n p[beg][j] = elem.probability * later_prob\n\n if round(p[starting_vertex][0] - 0.0, PROBABILITY_PRECISION) == 0:\n return 0, NO_SUCH_PATH\n\n path = [starting_vertex]\n for j in range(0, k):\n beg = path[j]\n for end in range(0, n):\n later_prob = 1 if j == k - 1 else p[end][j + 1]\n if adj_matrix[beg][end] and adj_matrix[beg][end].label_id == label_sequence[j]\\\n and round(p[beg][j] - adj_matrix[beg][end].probability * later_prob, PROBABILITY_PRECISION) == 0:\n path.append(end)\n break\n\n return p[starting_vertex][0], tuple(path)", "def calculate_path(self):\n #Se repite el ciclo para el número especificado de veces\n for i in range(self.iterations):\n for ant in self.ants:\n ant.setup_ant()\n while not ant.final_node_reached:\n #Seleccion aleatoria del nodo a visitar\n node_to_vist = self.select_next_node(self.map.nodes_array[int(ant.actual_node[0])][int(ant.actual_node[1])])\n #Mover la hormiga al siguiente nodo seleccionado al azar\n ant.move_ant(node_to_visit)\n #Compruebe si se ha alcanzado la solución\n ant.is_final_node_reached()\n #Agregar la ruta resultante a la lista de rutas\n self.add_to_path_results(self.delete_loops(ant.get_visited_nodes()))\n # Habilitar a la hormiga para otra busqueda\n ant.enable_start_new_path()\n \n # Actualizar el nivel global de feromonas\n self.pheromone_update()\n self.best_result = self.paths[0]\n\n #Vaciar la lista de rutas\n self.empty_paths()\n print('Iteration: ', i, 'lenght of the path: ', len(self.best_result))\n return self.best_result", "def time_path_iteration(params=params, S=3, T=50, weight=0.3, tol=1e-12, maxiter=100):\n ss_output = get_SS()\n b_ss = ss_output['b_ss']\n b_init = np.array([0, 0.8 * b_ss[0], 1.1 * b_ss[1]]) # t=0\n\n # Guess transition path, finishes at steady_state\n Kguess = np.linspace(b_init.sum(), ss_output['K_ss'], T)\n\n s = 1\n K_dynamic = Kguess\n b_current = np.zeros((S,T)) # initialize array to store savings decisions\n b_current[:,0] = b_init\n\n # Update b_path until convergence\n its = 0\n ee_diff = 7.0\n while ee_diff > tol and its < maxiter:\n its += 1\n w_dynamic = find_w(L=params['labor_supply'].sum(), K=K_dynamic)\n r_dynamic = find_r(L=params['labor_supply'].sum(), K=K_dynamic)\n for t in range(T-2):\n\n #solve for b32, savings decision of middle-aged in first period\n ee_param = (w_dynamic, r_dynamic, params['labor_supply'], b_current[:,t], s, t)\n b_current[s+1,t+1] = opt.root(ee_err_1, 0, args=ee_param).x\n\n # solve for b22, b33, savings decision of young gen in middle/old generations\n ee_param = (w_dynamic, r_dynamic, params['labor_supply'], b_init, s, t)\n b_current[s,t+1], b_current[s+1, t+2]= opt.root(ee_err_23, [0,0], args=ee_param).x\n # fill in table\n b_current[s,T-1] = b_current[s,T-2]\n\n # Check for convergence\n K_prime = b_current.sum(axis=0)\n ee_diff = (K_prime - K_dynamic).max()\n\n# rc_diff = production(K_prime, L=params['labor_supply'].sum())\n# - Ct = (1 + r_dynamic) * ()\n# - np.roll(K_prime, len(K_prime)-1)\n# - (1 - delta) * K_prime\n\n print('Iteration number: ', its, 'Current EE difference: ', ee_diff)\n # update new capital path\n K_dynamic = weight * K_prime + (1-weight) * K_dynamic\n\n fig, ax = plt.subplots(1,1,figsize=(8,6))\n plt.plot(range(T), Kguess, 'r--',lw=0.7, label='Kguess')\n plt.plot(range(T), K_dynamic , label='Capital Path Solution')\n plt.title('Transition Path of Aggregate Capital')\n plt.xlabel('Time period')\n plt.ylabel('Aggregate Capital')\n plt.legend()\n\n fig, ax = plt.subplots(1,1,figsize=(8,6))\n plt.plot(range(T), r_dynamic, 'g-o',label='Interest rate Path Solution')\n plt.title('Transition Path of Aggregate Interest rate')\n plt.xlabel('Time period')\n plt.ylabel('Interest Rate')\n plt.legend()\n\n fig, ax = plt.subplots(1,1,figsize=(8,6))\n plt.plot(range(T), w_dynamic, 'k-o',label='Wage Path Solution')\n plt.title('Transition Path of Wages')\n plt.xlabel('Time period')\n plt.ylabel('Wages')\n plt.legend()\n\n return K_dynamic", "def viterbi_path_log(prior, transmat, observ_likelihood):\n T = observ_likelihood.shape[-1]\n N = observ_likelihood.shape[0]\n\n path = numpy.zeros(T, dtype=numpy.int32)\n global_score = numpy.zeros(shape=(N,T))\n predecessor_state_index = numpy.zeros(shape=(N,T), dtype=numpy.int32)\n\n t = 1\n global_score[:, 0] = prior + observ_likelihood[:, 0]\n # need to normalize the data\n \n for t in range(1, T):\n for j in range(N):\n temp = global_score[:, t-1] + transmat[:, j] + observ_likelihood[j, t]\n global_score[j, t] = max(temp)\n predecessor_state_index[j, t] = temp.argmax()\n\n path[T-1] = global_score[:, T-1].argmax()\n \n for t in range(T-2, -1, -1):\n path[t] = predecessor_state_index[ path[t+1], t+1]\n\n return [path, predecessor_state_index, global_score]", "def OptimalWarpingPath( self, colStart=None ):\n rows = len(self.D)\n cols = len(self.D[0])\n n = rows-1\n m = cols-1\n if colStart:\n m=colStart\n path = [(n,m)]\n while n > 0 or m > 0:\n if n == 0 :\n path.insert(0,(0,m-1))\n m -= 1\n elif m == 0 :\n path.insert(0,(n-1,0))\n n -= 1\n else:\n minStep = min( self.D[n-1][m-1], self.D[n-1][m], self.D[n][m-1] )\n if self.D[n-1][m-1] == minStep:\n path.insert(0,(n-1,m-1))\n n -= 1\n m -= 1\n elif self.D[n-1][m] == minStep:\n path.insert(0,(n-1,m))\n n -= 1\n else: # self.D[n][m-1] == min:\n path.insert(0,(n,m-1))\n m -= 1\n return path, self.CostOfPath( path, self.D )", "def solve_maze(self):\n initial_maze_loc = self.maze.location\n curr_coord = initial_maze_loc\n solution_path_directions = []\n #print(\"in solve_maze:\")\n\n # The agent always chooses the next location with the highest Q value.\n # With this strategy, the agent aims to reach the goal using the\n # most optimal path possible.\n while (self.grid[curr_coord[0]][curr_coord[1]] != 'G' and\n self.grid[curr_coord[0]][curr_coord[1]] != 'E'):\n possible_moves = self.maze.moves()\n\n # Find the next best move.\n best_next_move = (0,0)\n best_next_move_q = float('-inf')\n for move in possible_moves:\n if self.qtable[curr_coord[0]+move[0]][curr_coord[1]+move[1]] >= best_next_move_q:\n best_next_move = move\n best_next_move_q = self.qtable[curr_coord[0]+move[0]][curr_coord[1]+move[1]]\n\n direction = self.maze.moves_to_dirs[best_next_move]\n solution_path_directions.append(direction)\n curr_coord = (curr_coord[0]+best_next_move[0], curr_coord[1]+best_next_move[1])\n self.maze.location = curr_coord\n self.maze.location = initial_maze_loc # reset maze location to initial coord.\n\n return solution_path_directions", "def Find_Path(self):\n closed_nodes_map = [] # map of closed (tried-out) nodes\n open_nodes_map = [] # map of open (not-yet-tried) nodes\n dir_map = [] # map of directions\n row = [0] * self.n\n for i in range(self.m): # create 2d arrays\n closed_nodes_map.append(list(row))\n open_nodes_map.append(list(row))\n dir_map.append(list(row))\n \n pq = [[], []] # priority queues of open (not-yet-tried) nodes\n pqi = 0 # priority queue index\n # create the start node and push into list of open nodes\n n0 = node(self.xStart, self.yStart, 0.0, 0.0)\n n0.updatePriority(self.xFinish, self.yFinish)\n heappush(pq[pqi], n0)\n open_nodes_map[self.yStart][self.xStart] = n0.priority # mark it on the open nodes map\n \n # A* search\n while len(pq[pqi]) > 0:\n # get the current node w/ the highest priority\n # from the list of open nodes\n n1 = pq[pqi][0] # top node\n n0 = node(n1.xPos, n1.yPos, n1.distance, n1.priority)\n x = n0.xPos\n y = n0.yPos\n heappop(pq[pqi]) # remove the node from the open list\n open_nodes_map[y][x] = 0\n # mark it on the closed nodes map\n closed_nodes_map[y][x] = 1\n \n # quit searching when the goal state is reached\n if x == self.xFinish and y == self.yFinish:\n # Generate the path from finish to start by following the \n # directions.\n return self.Reconstruct_Path(dir_map)\n \n # generate moves (child nodes) in all possible directions\n for i in range(self.num_directions):\n new_x = x + self.dx[i]\n new_y = y + self.dy[i]\n Flag=True\n if not (new_x < 0 or new_x > self.n-1 or new_y < 0 or new_y > self.m - 1\n or self.MAP[new_y][new_x] == 1 or closed_nodes_map[new_y][new_x] == 1):\n # Check to see if the extended path runs through any obstacles\n if (abs(self.dx[i])>1 or abs(self.dy[i])>1):\n # Need to check that the path does not pass an object\n JumpCells=2*max(abs(self.dx[i]),abs(self.dy[i]))-1\n for K in range(1,JumpCells):\n YPOS=int(round(K*1.0*self.dy[i]/JumpCells))\n XPOS=int(round(K*1.0*self.dx[i]/JumpCells))\n if (self.MAP[y+YPOS][x+XPOS]==1):\n Flag=False\n if Flag: \n # generate a child node\n m0 = node(new_x, new_y, n0.distance, n0.priority)\n m0.calc_cost(self.dx[i], self.dy[i])\n m0.updatePriority(self.xFinish, self.yFinish)\n # if it is not in the open list then add into that\n if open_nodes_map[new_y][new_x] == 0:\n open_nodes_map[new_y][new_x] = m0.priority\n heappush(pq[pqi], m0)\n # mark its parent node direction\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n elif open_nodes_map[new_y][new_x] > m0.priority:\n # update the priority info\n open_nodes_map[new_y][new_x] = m0.priority\n # update the parent direction info\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n # replace the node\n # by emptying one pq to the other one\n # except the node to be replaced will be ignored\n # and the new node will be pushed in instead\n while not (pq[pqi][0].xPos == new_x and pq[pqi][0].yPos == new_y):\n heappush(pq[1 - pqi], pq[pqi][0])\n heappop(pq[pqi])\n heappop(pq[pqi]) # remove the wanted node\n # empty the larger size pq to the smaller one\n if len(pq[pqi]) > len(pq[1 - pqi]):\n pqi = 1 - pqi\n while len(pq[pqi]) > 0:\n heappush(pq[1-pqi], pq[pqi][0])\n heappop(pq[pqi]) \n pqi = 1 - pqi\n heappush(pq[pqi], m0) # add the better node instead\n return '','' # no route found", "def second_heuristic(self):\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n # aceasta matrice indica valoarea pe care o are mutarea unei piese pe o celula aleasa\r\n # se va aduna la media ponderilor adunate in lista weights\r\n\r\n # mijlocul tablei este punctul cel mai vulnerabil\r\n # in timp ce lateralele sunt sigure,iar linia bazei transforma piesa in rege\r\n\r\n points = [[0, 4, 0, 4, 0, 4, 0, 4],\r\n [4, 0, 3, 0, 3, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 1, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 4, 0, 4, 0, 4, 0]]\r\n\r\n weights = [0 for i in range(4)]\r\n whites, blacks = 0, 0\r\n for i in range(8):\r\n for j in range(8):\r\n\r\n # numaram discurile de fiecare culoarea\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n\r\n if self.matrix[i][j] in [self.current_player, self.current_player.upper()]:\r\n\r\n # daca e piesa normala\r\n if self.matrix[i][j] == self.current_player:\r\n weights[0] += 4\r\n\r\n # cat de aproape este piesa de a deveni rege ( nr de linii din tabla - cate mai are pana ajunge pe ultima linie)\r\n\r\n # cu cat se apropie piesa mai multe de a deveni rege, scorul creste( negru - rege pentru i=0, alb -rege pentru i =7)\r\n if self.matrix[i][j] == 'n':\r\n weights[1] += (7 - i)\r\n elif self.matrix[i][j] == 'a':\r\n weights[1] += i\r\n else:\r\n # daca e piesa rege\r\n weights[0] += 8\r\n\r\n # cat de aproape este piesa rege de celelalte piese\r\n for d in directions:\r\n if self.matrix[i][j] == self.current_player.upper():\r\n # gaseste pe diagonala in directia d, o piesa adversara,daca exista\r\n x, y = self.find_piesa(i, j, d)\r\n if x and y:\r\n weights[2] += (x - i) * (x - i) + (y - j) * (y - j)\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n # piesele pe care le poate captura jucatorul, daca e piesa rege are un scor mai mare\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n if self.matrix[next_x][next_y] == self.opponent().upper():\r\n weights[3] += 7\r\n else:\r\n weights[3] += 4\r\n # piese care pot fi capturate; la fel daca este piesa rege atunci se scade mai mult scorul\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[3] -= 6\r\n else:\r\n weights[3] -= 3\r\n # adunam piesa la media sumei date pentru a face AI-ul in caz de egalitate a scorului\r\n # sa imi aleaga piesa care ma pozitioneaza mai bine\r\n if self.move:\r\n return sum(weights) / 4 + points[self.move[0]][self.move[1]]\r\n return sum(weights) / 4\r\n\r\n def __str__(self):\r\n s = ' '\r\n for i in range(8):\r\n s += str(i) + ' '\r\n s += '\\n'\r\n for index, line in enumerate(self.matrix):\r\n s += str(chr(index + ord('a'))) + ' '\r\n for el in line:\r\n s += str(el) + ' '\r\n s += '\\n'\r\n\r\n return s", "def fastest_path_estimation(sol):\n\n class Path:\n def __init__(self, places, graph):\n self.g = 0 # current cost\n self.graph = graph\n self.visited = [places[0]] # list of already visited attractions\n self.not_visited = copy.deepcopy(places[1:]) # list of attractions not yet visited\n\n def __lt__(self, other):\n return self.g < other.g\n\n def add(self, idx):\n # add the cost\n self.g += self.graph[self.visited[-1], idx]\n # add the to the visited place and remove from the unvisited places\n self.visited.append(idx)\n self.not_visited.remove(idx)\n\n def add_to_heap_queue(path):\n # custom function to add to heap queue sorted by the solution's cost\n heappush(h_queue, path)\n\n if len(sol.not_visited) == 0:\n return 0\n elif len(sol.not_visited) == 1:\n return sol.graph[sol.visited[-1], sol.not_visited[0]]\n\n c = sol.visited[-1]\n pm = sol.not_visited[-1]\n # the heap queue of solution sorted by their cost - change all to tuples with g for dijkstra\n h_queue = []\n\n # the places to use for the graph\n sub_search_places = [c]\n sub_search_places.extend(sol.not_visited)\n\n # push the first \"node\" in the queue\n add_to_heap_queue(Path(sub_search_places, sol.graph))\n while True:\n # take the next solution with the shortest cost\n path = heappop(h_queue)\n # if it contains destination, stop and return that solution\n if pm in path.visited:\n return path.g\n # create a new solution for each neighbor of the current vertex and add it to heap queue\n for place in path.not_visited:\n new_path = copy.deepcopy(path)\n new_path.add(place)\n add_to_heap_queue(new_path)", "def handle_solution(node, start_sq):\n final_route = []\n while True: # Find the best path by backtracking through all the parents, starting with the goal node\n final_route.insert(0, node)\n if node == start_sq:\n break\n node = node.parent\n print('Best path from A to B:')\n print_list(final_route)\n draw_best_route(final_route)", "def solve(self):\n # Use a trivial tour (1-2-3-...-N-1) to set the global upper bound.\n tour = list(range(self._N))\n upper_bound = sum([self._G[i][(i + 1) % self._N] for i in range(self._N)])\n trace = []\n\n # Start from a configuration with a single vertex.\n frontier = [BranchAndBoundConfiguration(self._G, self._N, [0], LOWER_BOUND_METHOD)]\n\n # Set the start time.\n start_time = time.time()\n\n # Branch and bound until the frontier set is empty or the time has expired.\n while frontier and (time.time() - start_time) < self._cutoff_time:\n # Fetch the most promising configuration.\n config = heappop(frontier)\n\n # Expand configuration by appending a vertex to the path.\n for v in range(self._N):\n try:\n expanded_config = config.expand(v)\n except ValueError:\n # Expanded configuration is not valid.\n continue\n if expanded_config.is_solution():\n # Update the global upper bound, if needed.\n this_solution = expanded_config.get_cycle_cost()\n if this_solution < upper_bound:\n # Log it.\n trace.append((time.time() - start_time, this_solution))\n # Update the best solution.\n upper_bound = this_solution\n tour = list(expanded_config.get_path())\n elif expanded_config.get_lower_bound() < upper_bound:\n # Add to the frontier set.\n heappush(frontier, expanded_config)\n return (upper_bound, [self._index_to_id[v] for v in tour], trace)", "def astar_multi(maze):\n # TODO: Write your code here\n gFunction = {}\n frontier = PriorityQueue()\n path = []\n ret = []\n MSTLengths = {}\n edges = {}\n\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives)\n gFunction[start] = 0\n frontier.put(start) \n getEdgeWeights(maze, objectives, edges) # init edge weights for MST\n\n while not frontier.empty():\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n objectivesLeft = currentState.objectives()\n\n if objectivesLeft.count(currentCell) != 0:\n objectivesLeft.remove(currentCell)\n\n # all objectives found, initialise backtrace and exit loop\n if len(objectivesLeft) == 0:\n path.clear()\n ret.clear()\n path.append(currentState)\n ret.append(currentCell)\n break\n \n # if we have already calculated MST length we can reuse value\n # else calculate MST length for this state and store it.\n length = 0\n if str(objectivesLeft) in MSTLengths:\n length = MSTLengths[str(objectivesLeft)]\n else:\n length = getMSTLength(objectivesLeft.copy(), maze, edges)\n MSTLengths[str(objectivesLeft)] = length\n\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectivesLeft)\n gVal= gFunction[currentState] + 1\n\n if neighbor not in gFunction or gVal < gFunction[neighbor]:\n\n neighbor.setParent(currentState)\n gFunction[neighbor] = gVal\n\n hFunction = []\n for j in objectivesLeft:\n hFunction.append(abs(j[0] - i[0]) + abs(j[1] - i[1]) + length) # use MST length + manhatten distance to nearest objective as heuristic.\n\n hVal = min(hFunction)\n\n neighbor.setfFunction(gFunction[neighbor] + hVal)\n frontier.put(neighbor)\n\n # backtrace\n while path[0]!= start:\n \n currentCell = path[0]\n path.insert(0, currentCell.parent())\n ret.insert(0, currentCell.parent().cell())\n\n return ret", "def solve(self):\n\t\t# row and column to control the selected corner\n\t\tcurrent_positions = [0, 0]\n\t\tmax_row = self.result_index[self.__ROW_INDEX]-1\n\t\tmax_col = self.result_index[self.__COLUMN_INDEX]-1\n\t\twhile self.matrix[max_row][max_col] != 0:\n\t\t\tcurrent_row = current_positions[self.__ROW_INDEX]\n\t\t\tcurrent_column = current_positions[self.__COLUMN_INDEX]\n\t\t\tmultiplier_selected = self.get_multiplier_with_index(current_row, current_column)\n\t\t\tactual_weight = self.matrix[current_row][current_column]\n\t\t\tactual_result_weight = multiplier_selected[1]\n\t\t\tself.resulting_matrix[current_row][current_column] = actual_weight * actual_result_weight\n\t\t\tself.matrix[max_row][max_col] -= actual_result_weight\n\t\t\tself.matrix[current_row][max_col] -= actual_result_weight\n\t\t\tself.matrix[max_row][current_column] -= actual_result_weight\n\t\t\tcurrent_positions[multiplier_selected[0]] += 1", "def Viterbi(_sentence, _model, _emission_df, _transition_df):\n\n if not _sentence:\n return []\n\n # EXECUTE VITERBI\n states = [state for state, _ in _model.y_count.items()]\n states.remove('__START__')\n states.remove('__STOP__')\n\n # keep table of values\n # (len(states) x len(sentence))\n value_table = [[0 for x in range(len(_sentence) + 1)] for y in range(len(states))]\n\n # keep table of sequences\n sequence_table = [[[] for x in range(len(_sentence))] for y in range(len(states))]\n\n # base case - START to all states\n for i in range(len(states)):\n # transition prob from __START__ to anything\n try:\n transition_prob = _transition_df[('__START__', states[i])]\n except KeyError:\n transition_prob = 0.0\n\n # error occurs here due to empty _sentence\n try:\n emission_prob = _emission_df[(_sentence[0], states[i])]\n except KeyError:\n emission_prob = 0.0\n\n value_table[i][0] = float(transition_prob) * float(emission_prob)\n sequence_table[i][0] = ['__START__', states[i]]\n\n # iterative/recursive case - state to state\n for i in range(1, len(_sentence)):\n\n # storage for prev\n prev_optimal = 0.0\n prev_state_seq = []\n\n for j in range(len(states)):\n try:\n # find e(xi|yj)\n emission_prob = float(_emission_df[(_sentence[i], states[j])])\n except KeyError:\n emission_prob = 0.0\n\n if prev_optimal == 0.0:\n # find optimal from state to state prob\n for k in range(len(states)):\n test_opti = float(value_table[k][i-1])\n if test_opti >= prev_optimal:\n prev_optimal = test_opti\n prev_state_seq = sequence_table[k][i-1]\n\n # given prev optimal, calculate transition prob\n try:\n # find transition prob from prev optimal state to current\n transition_prob = float(_transition_df[(prev_state_seq[-1], states[j])])\n except KeyError:\n transition_prob = 0.0\n\n prob = prev_optimal * transition_prob * emission_prob\n next_state_seq = prev_state_seq + [states[j]]\n\n value_table[j][i] = prob\n sequence_table[j][i] = next_state_seq\n\n # end case - all states to __STOP__\n for i in range(len(states)):\n try:\n transition_prob = _transition_df[(states[i], '__STOP__')]\n except KeyError:\n transition_prob = 0.0\n\n value_table[i][-1] = float(transition_prob) * float(value_table[i][-2])\n\n # take optimal from table and return optimal val and sequence\n max_val = 0\n result_seq = []\n for i in range(len(states)):\n prob = float(value_table[i][-1]) # take all from last\n if max_val == 0 or prob > max_val:\n max_val = prob\n result_seq = sequence_table[i][-1]\n\n return result_seq[1:]", "def floyd_warshall(A):\n n = A.shape[0]\n \n for k in tqdm(range(1, n+1)):\n for i in range(n):\n for j in range(n):\n A[i,j,k] = min(A[i,j,k-1], A[i,k-1,k-1]+A[k-1,j,k-1])\n \n \n for i in range(n):\n if A[i,i,n] <0:\n min_path = 'Negative cycle'\n return min_path\n min_path = np.min(A[:,:,n])\n \n return min_path", "def viterbi(prob_matrix):\n TINY = 1e-6 # to avoid NaNs in logs\n\n # if prob_matrix is 1D, make it 2D\n if len(np.shape(prob_matrix)) == 1:\n prob_matrix = [prob_matrix]\n \n length = len(prob_matrix)\n\n probs = np.zeros_like(prob_matrix)\n backpt = np.ones_like(prob_matrix, dtype=np.int32) * -1\n \n for i in [0,1,2,3,4]:\n probs[0][i] = np.log(prob_matrix[0][i]+TINY)\n \n # {B, M, E, S} <=== 0:begin, 1:middle, 2:end, 3:single\n for t in range(1, length):\n # E, S -> B | B, M -> M | B, M -> E | E, S -> S\n previous_of = [[0,0], [3,4], [1,2], [1,2], [3,4]]\n for i in range(5):\n prevs = previous_of[i]\n max_id = prevs[np.argmax([probs[t-1][prevs[0]], probs[t-1][prevs[1]]])]\n backpt[t][i] = max_id\n probs[t][i] = np.log(prob_matrix[t][i]+TINY) + probs[t-1][max_id]\n\n seq = np.ones(length, 'int32') * -1\n #print(probs[length-1])\n seq[length-1] = np.argmax(probs[length-1])\n #print(seq[length-1])\n max_prob = probs[length-1][seq[length-1]]\n for t in range(1, length):\n seq[length-1-t] = backpt[length-t][seq[length-t]]\n \n return seq", "def get_shortest_route_floyd(network, start,destination, excludings=[]):\n\n # On récupère la liste des villes\n list_city = network[1].keys()\n \n # Si la ville de départ ou de fin n'existe pas\n if start not in list_city or destination not in list_city:\n return None\n\n # On retire les villes à exclure\n list_city = [x for x in list_city if x not in excludings]\n\n\n # Initialisation de se qu'on a besoin\n matrix = []\n distance = []\n n = len(list_city)\n\n \n # On construit la matrice adjacente où indique la distance si il existe une autoroute entre 2 villes\n for x in range(n): \n matrix.append( [] )\n distance.append( [] )\n for y in range(n):\n road_id = get_road_to(network,list_city[x],list_city[y])\n if road_id != None:\n matrix[x].append( get_length(network,road_id) )\n else:\n matrix[x].append( None )\n distance[x].append( [road_id] ) # Autoroute -> format: ['LA']\n\n\t \n # Algorithme de Floyd\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if ( matrix[i][k] != None and matrix[k][j] != None ) and ( ( matrix[i][j] == None ) or ( matrix[i][j] > matrix[i][k] + matrix[k][j] ) ):\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t \n\t\t # Hors Floyd / Ajout personnel\n if i != k and j != k: # Si i == k ou j == k, cela veut dire qu'on additionne un résultat supplémentaire à la case ij\n distance[i][j] = [] # Sinon ca signifie qu'on a trouvé un chemin plus court, du coup on supprime l'ancien chemin\n distance[i][j].extend( distance[i][k] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n distance[i][j].extend( distance[k][j] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n\n\t\t \n # On récupère simplement la liste des autoroutes parcourus\n idx_start = list_city.index( start )\n idx_destination = list_city.index( destination )\n distance_minimum = distance[ idx_start ][ idx_destination ]\n\n \n # Si on ne trouve aucune solution, on renvoie None\n if distance_minimum == [None]:\n distance_minimum = None\n \n return distance_minimum", "def objective(V,m,adj):\r\n #number of edges in G = (V,E)\r\n link = 0\r\n for i in range(m):\r\n for j in range(i,m):\r\n if i != j and (V[i],V[j]) in adj or (V[j],V[i]) in adj:\r\n link += 1\r\n #number of expected edges to have a module\r\n edges = factorial(m)/(factorial(m-2)*2)\r\n obj = edges - link\r\n\r\n return obj", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()", "def calculate_costs(self):\n cost_matrix = self.make_cost_matrix()\n \n if self.greedy:\n # Riesen et al., \"Greedy Graph Edit Distance\"\n costs = []\n psi = []\n \n for row in range(self.N):\n phi = self.M\n row_min = sys.maxint\n for column in range(self.N+self.M):\n if column not in psi:\n if cost_matrix[row, column] < row_min:\n row_min = cost_matrix[row, column]\n phi = column\n \n costs.append(row_min)\n if phi < self.M:\n psi.append(phi)\n \n for row in range(self.N, self.N+self.M):\n if (row - self.N) not in psi:\n costs.append(cost_matrix[row, row - self.N])\n else:\n # Riesen & Bunke, \"Approximate graph edit distance computation by means of bipartite graph matching\"\n row_ind, col_ind = optimize.linear_sum_assignment(cost_matrix)\n \n if self.verbose:\n for row, column in (row_ind, col_ind):\n value = cost_matrix[row, column]\n print '%d, %d, %.4f' % (row, column, value)\n \n return row_ind, col_ind, cost_matrix[row_ind, col_ind]", "def compute_adj_matrix_fitness(solution):\n\n solution_fitness = 0.0\n\n for index in range(len(solution)):\n waypoint1 = solution[index - 1]\n waypoint2 = solution[index]\n solution_fitness += adj_matrix[waypoint1, waypoint2]\n\n return solution_fitness", "def viterbi(p_observations_given_state, p_transition, p_initial):\n p_observations_given_state = numpy.asarray(p_observations_given_state)\n p_transition = numpy.asarray(p_transition)\n p_initial = numpy.asarray(p_initial)\n N, S = p_observations_given_state.shape\n assert p_transition.shape in {(S, S), (N-1, S, S)}\n if p_transition.shape == (S, S):\n p_transition = numpy.array([p_transition for i in range(N-1)])\n assert numpy.allclose(numpy.sum(p_transition, axis=2), 1)\n assert p_initial.shape == (S,)\n assert numpy.allclose(numpy.sum(p_initial), 1)\n\n # convert all probabilities to log probabilities so we can sum instead of\n # multiplying, which better controls numerical error.\n err = numpy.seterr(divide='ignore') # allow log(0) to go to -inf, as desired\n lp_observations_given_state = numpy.log(p_observations_given_state)\n lp_transition = numpy.log(p_transition)\n lp_initial = numpy.log(p_initial)\n numpy.seterr(**err)\n\n states = numpy.arange(S)\n # path[i] always contains the maximum likelihood sequence of states ending at state i\n path = [[i] for i in states]\n # lp_state contains the current log probability of being in the state given the sequence\n # of observations thus far considered.\n lp_state = lp_observations_given_state[0] + lp_initial\n\n for lp_obs, lp_trans in zip(lp_observations_given_state[1:], lp_transition):\n # For each observation after the first timepoint, construct an (S, S)\n # shape array where [si, sj] contains the log probability of going from\n # state si to state sj between time t and t+1.\n # Assume we know for each state si prob(si at time t), the probability\n # of being in that state at that time, then we can calculate the probability\n # of being in any given state sj at time t+1:\n # prob(transition from si at time t to sj at time t+1) = prob(si at t) *\n # prob(si->sj between t and t+1) *\n # prob(observation at t+1 given state sj)\n # prob(j at time t+1) = max_i(prob(i at time t -> j at time t+1))\n #\n # Thus we merely need to keep updating our estimates for the probability\n # of being in each state at each time, and keep a list of the path that\n # lead to each state.\n #\n # The actual code in use is 100% equivalent to the code below; however it\n # is rather more efficient.\n #\n # lp_transition_t = numpy.zeros((s, s), dtype=float)\n # new_path = []\n # lp_state = []\n # for s_to in states:\n # best_from_lp = -numpy.inf\n # for s_from in states:\n # lp_transition_t[s_from, s_to] = lp_state[s_from] + lp_trans[s_from, s_to] + lp_obs[s_to]\n # if lp_transition_t[s_from, s_to] > best_from_lp:\n # best_from = s_from\n # best_from_lp = lp_transition_t[s_from, s_to]\n # lp_state.append(best_from_lp)\n # new_path.append(path[best_from] + [s_to])\n # path = new_path\n lp_transition_t = lp_state[:,numpy.newaxis] + lp_trans + lp_obs[numpy.newaxis,:]\n best_from = numpy.argmax(lp_transition_t, axis=0)\n path = [path[s_from]+[s_to] for s_to, s_from in enumerate(best_from)]\n lp_state = lp_transition_t[best_from, states]\n last_state = numpy.argmax(lp_state)\n return numpy.array(path[last_state])", "def IteratePaths(self):\n self.w = self.setwage(self.K, self.N)\n self.r = self.setrate(self.K, self.N)\n self.b = self.benefit(self.N)\n\n a1, aT = [-1,], []\n\n for q in range(self.Nq):\n if q == 0:\n self.apath[-1] = 0.2\n elif q == 1:\n self.apath[-1] = 0.3\n else:\n self.apath[-1] = max(0,aT[-1]-(aT[-1]-aT[-2])*a1[-1]/(a1[-1]-a1[-2]))\n \n self.npath[-1] = 0\n self.cpath[-1] = self.apath[-1]*(1+self.r) + self.b\n\n for y in range(-2,-(self.T+1),-1): # y = -2, -3,..., -60\n self.apath[y], self.npath[y], self.cpath[y] = self.DirectSolve(y)\n\n aT.append(self.apath[-1])\n a1.append(self.apath[-self.T])\n if (fabs(self.apath[-self.T])<self.tol):\n break\n for y in range(-1,-(self.T+1),-1):\n self.upath[y] = self.util(self.cpath[y],self.npath[y])", "def solve_(self, x, y, board, path):\n if self.SOLVED:\n return\n if self.is_done(board):\n self.print_path(path)\n self.SOLVED = True\n return\n for new_x, new_y in self.next_click(x, y, board):\n if new_x is None or new_y is None:\n return\n new_board = self.click(new_x, new_y, board)\n self.solve_(\n x=0, y=0,\n board=new_board,\n path=path + [((new_x, new_y), new_board)]\n )", "def bi_djikstre(connection_mat):\n n = connection_mat.shape[0]\n \n dist_f, prev_f = {}, {}\n Q_f = list(range(n))\n \n dist_b, prev_b = {}, {}\n Q_b = list(range(n))\n \n for i in Q_f:\n dist_f[i] = np.inf\n dist_f[n-2] = 0.0\n \n for i in Q_b:\n dist_b[i] = np.inf\n dist_b[n-1] = 0.0\n \n done_f = []\n done_b = []\n \n while not (set(done_b) & set(done_f)):\n \n for di, dist, prev, Q, done, connections, end in zip(['A', 'B'],[dist_b, dist_f], [prev_b, prev_f], [Q_b, Q_f], [done_b, done_f], [connection_mat.transpose(), connection_mat], [' ','\\n']):\n\n min_dist = min([dist[key] for key in Q])\n u = [key for key in Q if dist[key] == min_dist][0]\n# print(u, di, end=end)\n\n for v in np.nonzero(connections[:, u])[0]:\n# print(np.nonzero(connections[:, u])[0])\n alt = dist[u]+connections[v, u]\n# print(dist)\n# print(dist[u], alt)\n\n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n# print('added to prev', di, prev)\n# print('added to dist', di, dist)\n \n done.append(u)\n Q.remove(u)\n \n meeting_point = list(set(done_b) & set(done_f))[0]\n \n# print('Meeting point:', meeting_point)\n\n path_b=[]\n path_f=[]\n\n# path_f.append(u)\n \n u = meeting_point\n \n while u != n-1:\n# print(u)\n u = prev_b[u]\n path_b.append(u)\n \n u = meeting_point\n\n while u != n-2:\n# print(u)\n u = prev_f[u]\n path_f.append(u)\n \n full_path =path_b[::-1]\n full_path.append(meeting_point)\n full_path.extend(path_f)\n \n return full_path", "def heuristic(self):\r\n # 1.\r\n blacks, whites = 0, 0\r\n weights = [0 for _ in range(6)]\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n user_dir = directions[:2] if self.current_player == 'n' else directions[2:]\r\n for i in range(8):\r\n for j in range(8):\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n if self.matrix[i][j] == self.current_player or self.matrix[i][j] == self.current_player.upper():\r\n\r\n # numarul de piese rege\r\n if self.matrix[i][j] == self.current_player.upper():\r\n weights[1] += 7.75\r\n\r\n # numarul de piese normale\r\n else:\r\n weights[0] += 5\r\n\r\n # numarul de piese de pe baseline in functie de tipul de piesa\r\n # conform strategiilor de joc este o strategie buna sa ai cat mai multe\r\n # piesa pe baseline pentru a preveni creare de piese de tip rege ale adversarului\r\n if self.current_player in ['n', 'N']:\r\n if i == 7:\r\n weights[2] += 4\r\n elif self.current_player in ['a', 'A']:\r\n if i == 0:\r\n weights[2] += 4\r\n\r\n # numarul de piese din mijlocul tablei\r\n # la fel este o strategie buna pentru atac\r\n if 3 <= i <= 4 and 3 <= j <= 4:\r\n weights[3] += 2\r\n\r\n # numar piese vulnerabile\r\n # adica piese ce pot fi capturate de oponent la urmatoare tura\r\n for d in user_dir:\r\n\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n weights[4] -= 3\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n # daca elimin o piesa rege este o mutare mai buna\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[5] += 10\r\n else:\r\n weights[5] += 7\r\n\r\n diff = (blacks - whites) if self.current_player == 'n' else (whites - blacks)\r\n # cand sunt mai putin piese, AI adopta o tactica mai ofensiva\r\n if blacks + whites <= 10:\r\n return sum(weights) + diff\r\n return sum(weights)", "def solve():\n # the amount of lattice paths from (0, 0) to (n, k) is (n+k) over n (according to Wikipedia)\n return binomial_coefficient(20 + 20, 20)", "def fn(i, j):\n if grid[i][j] <= 0: return 0\n grid[i][j] *= -1 # mark as visited \n ans = 0\n for ii, jj in (i-1, j), (i, j-1), (i, j+1), (i+1, j): \n if 0 <= ii < m and 0 <= jj < n: \n ans = max(ans, fn(ii, jj) - grid[i][j])\n grid[i][j] *= -1 # backtracking \n return ans", "def solve(self):\n start = datetime.now()\n f = self.function\n while not self.converged():\n self.history.append(self.vertices)\n\n #step 1: sort\n self.order_vertices()\n\n #step 3: reflect\n reflected = self.get_reflected_point()\n if f(*self.vertices[0]) < f(*reflected) < f(*self.vertices[-1]):\n self.reflect()\n continue\n\n #step 4: expand\n if self.reflected_is_best():\n expanded = self.get_expanded_point()\n if f(*expanded) < f(*reflected):\n self.expand()\n else:\n self.reflect()\n continue\n\n #step 5: contract\n contracted = self.get_contracted_point()\n if f(*contracted) < f(*self.vertices[-1]):\n self.contract()\n continue\n\n #step 6: shrink\n self.shrink()\n print(\"optimization took {0}\".format(datetime.now()-start))\n return self.history, self.cache.history", "def floydWarshall(graph):\n \"\"\" initializing the solution matrix same as input graph matrix\n OR we can say that the initial values of shortest distances\n are based on shortest paths considerting no \n intermedidate vertices \"\"\"\n V = len(graph[0])\n dist = [[elem for elem in line] for line in graph]\n \n \"\"\" Add all vertices one by one to the set of intermediate\n vertices.\n ---> Before start of a iteration, we have shortest distances\n between all pairs of vertices such that the shortest\n distances consider only the vertices in set \n {0, 1, 2, .. k-1} as intermediate vertices.\n ----> After the end of a iteration, vertex no. k is\n added to the set of intermediate vertices and the \n set becomes {0, 1, 2, .. k}\n \"\"\"\n for k in range(V):\n \n # pick all vertices as source one by one\n for i in range(V):\n \n # Pick all vertices as destination for the\n # above picked source\n for j in range(V):\n \n # If vertex k is on the shortest path from \n # i to j, then update the value of dist[i][j]\n dist[i][j] = min(dist[i][j], dist[i][k] + dist[k][j])\n\n for line in dist:\n print line\n\n return dist", "def solve(self):\r\n while not self.done():\r\n self.no_open_cells()\r\n self.all_cells_are_mines()\r\n self.no_mines()\r\n if not self.done():\r\n self.obvious_cells()\r\n if not self.done():\r\n made_progress = self.safe_neighbour_difference()\r\n if made_progress:\r\n continue\r\n if not self.done():\r\n made_progress = self.adjacent_combinations()\r\n if made_progress:\r\n continue\r\n return", "def solve_driv(v, ene, s, n, h):\n\n xs = np.array([(k+1)*h for k in range(n)])\n h2 = h*h\n k = np.sqrt(2.0*ene)\n \n vs = [v(x)-ene for x in xs]\n\n mat = laplacian_mat(n) -2.0 * h2 * scipy.sparse.diags(vs, 0) + bc_outgoing_mat(n, h, k)\n vec = np.array([-2.0*h*h*s(x) for x in xs])\n\n ys = scipy.sparse.linalg.spsolve(mat, vec)\n return (xs, ys)", "def solve_maze(self):\r\n # if there is no maze to solve, cut the method\r\n if not self.generated:\r\n return None\r\n\r\n # initialize with empty path at starting cell\r\n self.path = dict()\r\n current = self.start\r\n\r\n # loop until the ending cell is reached\r\n while True:\r\n while True:\r\n # choose valid direction\r\n # must remain in the grid\r\n # also must not cross a wall\r\n dirNum = random.randint(0,3)\r\n adjacent = self.get_next_cell(current,dirNum,1)\r\n if self.is_valid_direction(current,dirNum):\r\n hasWall = (self.grid[adjacent[0]][adjacent[1]] == 0)\r\n if not hasWall:\r\n break\r\n # add cell and direction to path\r\n self.path[current] = dirNum\r\n\r\n # get next cell\r\n current = self.get_next_cell(current,dirNum,2)\r\n if current == self.end: \r\n break # break if ending cell is reached\r\n\r\n # go to start of path\r\n current = self.start\r\n self.solution.append(current)\r\n # loop until end of path is reached\r\n while not (current == self.end):\r\n dirNum = self.path[current] # get direction\r\n # add adjacent and crossed cells to solution\r\n crossed = self.get_next_cell(current,dirNum,1)\r\n current = self.get_next_cell(current,dirNum,2)\r\n self.solution.append(crossed)\r\n self.solution.append(current)\r\n\r\n self.path = dict()", "def iterativeDeepeningSearch(problem):\n \"*** YOUR CODE HERE FOR TASK 1 ***\"\n\n # Retrieve the init state\n # state model ( (position, depth), path, cost)\n initState = ( (problem.getStartState(), 1) , ['Stop'], 0)\n limit = 1\n while True:\n # Initialization each iteration\n open = util.Stack()\n open.push(initState)\n closed = {}\n\n while not open.isEmpty():\n currState = open.pop()\n currPos = currState[0][0]\n currDepth = currState[0][1]\n currPath = currState[1]\n currCost = currState[2]\n\n closed[currPos] = currCost\n if currDepth <= limit:\n successors = problem.getSuccessors(currPos)\n if len(successors) > 0:\n nextDepth = currDepth + 1\n for each in successors:\n nextCost = currCost + each[2]\n nextPath = currPath + [each[1]]\n if each[0] not in closed.keys() or nextCost < closed[each[0]]:\n temp = ( (each[0], nextDepth), nextPath, nextCost)\n open.push(temp)\n if problem.isGoalState(temp[0][0]):\n return nextPath[1:]\n limit += 1", "def TSP(noOfCities, cityIndicesExcluding1, citiesDistance):\r\n\r\n # Initializing the DP Matrix\r\n # Dictionary because the subset is also to be saved alongwith end vertex as index of dictionary\r\n dpMatrixDict = {}\r\n\r\n # Base case\r\n dpMatrixDict[ ( 1, ), 1 ] = 0\r\n\r\n # Iterating to solve bigger subproblems using dynamic programming\r\n for subsetSizeWithout1 in range(1, noOfCities):\r\n\r\n # Subset is of size subsetSizeWithout1 + 1\r\n print(\"COMPUTING. ON ITERATION NUMBER : \" + str(subsetSizeWithout1) + \" OUT OF \" + str(noOfCities - 1) )\r\n\r\n # Reducing computation by ignoring off the smaller subproblems solutions no longer required\r\n if subsetSizeWithout1 > 3:\r\n smallerEfficientDpMatrixDict = {}\r\n for key in dpMatrixDict:\r\n if len(key[0]) == subsetSizeWithout1:\r\n smallerEfficientDpMatrixDict[key] = dpMatrixDict[key]\r\n dpMatrixDict = smallerEfficientDpMatrixDict\r\n\r\n # Getting the subsets reuired\r\n sizeSpecificSubsets = getSubsets(cityIndicesExcluding1, subsetSizeWithout1)\r\n\r\n # Base cases\r\n for subset in sizeSpecificSubsets:\r\n dpMatrixDict[subset, 1] = 99999999\r\n\r\n\r\n for subset in sizeSpecificSubsets:\r\n\r\n # Computing through each possible end vertex\r\n for j in subset[1:]:\r\n\r\n # List to store the candidates for minimums\r\n possibilities = []\r\n\r\n # Computing through each possible last hop\r\n for k in subset:\r\n\r\n # Storing possibilities alongwith the end vertex\r\n if k != j:\r\n tupleCopy = tupleCopyWithoutElement(subset, j)\r\n possibilities.append( dpMatrixDict[tupleCopy, k] + citiesDistance[k, j] )\r\n\r\n # Getting the minimum path from the possible minimum candidates\r\n try:\r\n minimumPath = min(possibilities)\r\n dpMatrixDict[subset, j] = minimumPath\r\n except:\r\n continue\r\n\r\n\r\n # List for storing all final possible path candidates containing all the vertices\r\n # and a last hop between the start and end vertex to make a cycle\r\n finalHamiltonianPathCandidates = []\r\n\r\n # Final Set(and/or Subset) including all the vertices\r\n almostCompletePath = tuple(range(1, noOfCities + 1))\r\n\r\n # Adding the last hop of the cycle of hamiltonian path between the end and start vertex\r\n for j in cityIndicesExcluding1:\r\n finalHamiltonianPathCandidates.append( dpMatrixDict[almostCompletePath, j] + citiesDistance[j, 1] )\r\n\r\n # Getting the final minimum solution\r\n hamiltonianPathSolution = min(finalHamiltonianPathCandidates)\r\n\r\n # Printing the solution\r\n print(\"The optimal(minimum) length Hamiltonian path distance is : \" + str( hamiltonianPathSolution ) )\r\n\r\n return", "def solve(self):\n self.m.optimize()\n if self.m.status == GRB.OPTIMAL:\n self.solution = self.sol_as_mat()\n return self.solution", "def solve(self):\n smallest_f = self.get_smallest_f_cost_unvisited_node()\n smallest_f_node = smallest_f[0]\n\n if smallest_f[1] > 1:\n current_node = self.get_smallest_h_cost_unvisited_node()\n else:\n current_node = smallest_f_node\n if current_node.f_cost == self.inf:\n return\n\n self.set_h_cost(current_node)\n self.unvisited_pos.remove(current_node.pos)\n self.visited_pos.append(current_node.pos)\n neighbours = algo_utils.get_neighbours(current_node, self.grid, self.wall_pos)\n\n for neigh in neighbours:\n neighbour_dist = neigh.g_cost\n current_dist = current_node.g_cost\n new_dist = current_dist + 1\n if neighbour_dist < new_dist:\n continue\n neigh.g_cost = new_dist\n self.set_h_cost(neigh)\n mix_neigh = {neigh.pos: neigh.g_cost}\n self.mix.update(mix_neigh)\n mix_current = {current_node.pos: current_node.g_cost}\n self.mix.update(mix_current)\n\n smallest_f = self.get_smallest_f_cost_unvisited_node()\n smallest_f_node = smallest_f[0]\n smallest_h_node = self.get_smallest_h_cost_unvisited_node()\n\n if (\n self.end_pos not in self.unvisited_pos\n or algo_utils.get_smallest_g_cost_unvisited_node(\n self.grid, self.unvisited_pos\n ).g_cost\n == self.inf\n ):\n for key, value in self.mix.items():\n self.mix[key] = round((value * 1.0) / self.end_node.g_cost, 3)\n self.backtrack_path(self.end_node)\n else:\n if smallest_f[1] > 1:\n current_node = smallest_h_node\n else:\n current_node = smallest_f_node\n self.solve()", "def extra(maze):\n # TODO: Write your code here\n heuristic_lookup = {} \n objs = maze.getObjectives()\n corner_list = maze.getObjectives()\n start = maze.getStart()\n path = []\n dim = maze.getDimensions()\n visited = {}\n lookup_table = {}\n p_queue = []\n edgeset = []\n mintree = {}\n start_heuristic = 0 + multi_dot_heuristic_query(maze, start, objs, edgeset, mintree) * 2\n heuristic_lookup[(start, tuple(objs))] = start_heuristic\n start_state = state(start, corner_list)\n lookup_table[state(start, corner_list)] = (start_heuristic, 0, state((-2, -2)))\n p_queue.append((start_heuristic, state(start, corner_list)))\n while p_queue:\n pair = p_queue.pop(0)\n visited[pair[1]] = lookup_table.get(pair[1])[2]\n if not pair[1].getlist():\n current_state = pair[1]\n while current_state != start_state:\n path.append(current_state.getpos())\n current_state = visited.get(current_state)\n path.append(start)\n path.reverse()\n return path\n else: \n list_of_neighbors = maze.getNeighbors(pair[1].getpos()[0], pair[1].getpos()[1])\n for coordinates in list_of_neighbors:\n current_state = state(coordinates)\n if coordinates in pair[1].getlist():\n new_list = copy.copy(pair[1].getlist())\n new_list.remove(coordinates)\n current_state = state(coordinates, new_list)\n else:\n current_state = state(coordinates, pair[1].getlist()) \n if current_state in visited:\n continue\n if current_state in lookup_table:\n if (lookup_table.get(current_state)[0], current_state) in p_queue:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist())))\n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n old_heuristic = lookup_table.get(current_state)[0]\n if heuristic < lookup_table.get(current_state)[0]:\n lookup_table[current_state] = (heuristic, cost, pair[1])\n p_queue.remove((old_heuristic, current_state))\n bisect.insort(p_queue, (heuristic, current_state))\n else:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist()))) \n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n lookup_table[current_state] = (heuristic, cost, pair[1])\n bisect.insort(p_queue, (heuristic, current_state))\n\n return []", "def solve(mm):\n model = mm.model\n model.optimize()\n\n\n mm.optimal = model.status\n mm.take_snapshot()\n print \"\\nSnapshot saved as {}\".format(mm.filename)\n mm.solve_count += 1\n mm.update_filename()\n\n if model.status == gp.GRB.OPTIMAL:\n # Write a csv of the solution data\n write_solution(mm)\n\n\n return True", "def viterbi(self, O):\n\n predecessor = numpy.ones([len(O), len(self)], dtype = int) * -1\n delta = numpy.zeros([len(O), len(self)])\n B = numpy.zeros([len(self), len(O)])\n\n for j in range(len(self.S)):\n delta[0, j] = self.log_P[j] + self.S[j].b(O[0])\n\n for t in range(1, delta.shape[0]):\n for j in range(delta.shape[1]):\n #\n _temp_ = delta[t - 1, :] + self.A.log_transitions[:, j]\n #\n _from_ = numpy.argmax(_temp_)\n predecessor[t, j] = _from_\n delta[t, j] = delta[t - 1, _from_] + self.S[j].b(O[t])\n #\n #\n if self.A.force_to_one_terminal_state:\n _best_ = len(delta[-1]) - 1 # According to Transitions.py the terminal state is the last one\n else:\n _best_ = numpy.argmax(delta[-1, :])\n seq = numpy.ones(len(O)) * -1\n t = len(O) - 1\n i = _best_\n while t > 0:\n seq[t] = i\n i = predecessor[t, i]\n t = t - 1\n #\n return delta[-1, _best_], seq", "def two_opt(route, adjacency_matrix, max_chain_length):\n cost_list = []\n chain = 0\n while chain < max_chain_length:\n for i in range(1, len(route) - 2):\n for j in range(i + 1, len(route)):\n chain += 1\n\n if j - i == 1: continue\n\n cost_list.append(calculate_cost(route,adjacency_matrix)[1])\n\n if cost_change(adjacency_matrix, route[i - 1], route[i], \\\n route[j - 1], route[j]) < -0.001:\n route[i:j] = route[j - 1:i - 1:-1]\n\n if chain == max_chain_length:\n return route, cost_list\n\n return route, cost_list", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n def __eq__(self, other):\n if isinstance(other, Node):\n return self.state == other.state\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n frontier = util.PriorityQueue() #ucs uses a priority queue\n frontier.push(initialNode, initialNode.pathCost)\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n if problem.isGoalState(nextNode.state):\n return nextNode.solution()\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored:\n frontier.update(child, child.pathCost) #we only check if state is in explored because update does the other\n return []\n util.raiseNotDefined()", "def viterbi_paths(self, X: List[np.ndarray], **kwargs) -> Tuple[List[np.ndarray], List[np.ndarray]]:", "def _brute_force(self):\n if self.N > 9:\n #print(\"Input set is too big for brute force estimation.\")\n self.best_path = None\n else:\n #print(\"Number of permutations to check: {}\".format(math.factorial(self.N)))\n #init = \n A = self._P + np.finfo(np.float).eps\n A = (A + (1-A).T)/2\n for i in range(A.shape[0]):\n A[i,i] = np.finfo(np.float).eps\n init = (A>0.5).sum(axis=1).argsort()[::-1]\n #--- use log(p(Y=1\\mid s',s)) to shift multiplication to sum\n lP = np.log(A)\n for i in range(lP.shape[0]):\n lP[i,i] = 0\n #init_cost = 0\n ##--- lP[x:x+1] está MAL hay que sumar respecto a i+1 en z, no en lP.\n #for i in range(len(init)-1):\n # init_cost += lP[init[i],init[i+1]:].sum()\n z_star = []\n z_cost = -np.inf\n for z in permutations(range(self.N)):\n cost = 0\n for i in range(len(z)-1):\n cost += lP[z[i],z[i+1:]].sum()\n if cost > z_cost:\n z_cost = cost\n z_star = z\n self.best_path = np.array(z_star)", "def create_matrices(maze, reward, penalty_s, penalty_l, prob):\n \n r, c = np.shape(maze)\n states = r*c\n p = prob\n q = (1 - prob)*0.5\n \n # Create reward matrix\n path = maze*penalty_s\n walls = (1 - maze)*penalty_l\n combined = path + walls\n \n combined[-1, -1] = reward\n \n R = np.reshape(combined, states)\n \n # Create transition matrix\n T_up = np.zeros((states, states))\n T_left = np.zeros((states, states))\n T_right = np.zeros((states, states))\n T_down = np.zeros((states, states))\n \n wall_ind = np.where(R == penalty_l)[0]\n\n for i in range(states):\n # Up\n if (i - c) < 0 or (i - c) in wall_ind :\n T_up[i, i] += p\n else:\n T_up[i, i - c] += p\n \n if i%c == 0 or (i - 1) in wall_ind:\n T_up[i, i] += q\n else:\n T_up[i, i-1] += q\n \n if i%c == (c - 1) or (i + 1) in wall_ind:\n T_up[i, i] += q\n else:\n T_up[i, i+1] += q\n \n # Down\n if (i + c) > (states - 1) or (i + c) in wall_ind:\n T_down[i, i] += p\n else:\n T_down[i, i + c] += p\n \n if i%c == 0 or (i - 1) in wall_ind:\n T_down[i, i] += q\n else:\n T_down[i, i-1] += q\n \n if i%c == (c - 1) or (i + 1) in wall_ind:\n T_down[i, i] += q\n else:\n T_down[i, i+1] += q\n \n # Left\n if i%c == 0 or (i - 1) in wall_ind:\n T_left[i, i] += p\n else:\n T_left[i, i-1] += p\n \n if (i - c) < 0 or (i - c) in wall_ind:\n T_left[i, i] += q\n else:\n T_left[i, i - c] += q\n \n if (i + c) > (states - 1) or (i + c) in wall_ind:\n T_left[i, i] += q\n else:\n T_left[i, i + c] += q\n \n # Right\n if i%c == (c - 1) or (i + 1) in wall_ind:\n T_right[i, i] += p\n else:\n T_right[i, i+1] += p\n \n if (i - c) < 0 or (i - c) in wall_ind:\n T_right[i, i] += q\n else:\n T_right[i, i - c] += q\n \n if (i + c) > (states - 1) or (i + c) in wall_ind:\n T_right[i, i] += q\n else:\n T_right[i, i + c] += q\n \n T = [T_up, T_left, T_right, T_down] \n \n return T, R", "def uniformCostSearch(problem):\n # Initialization\n startState = problem.getStartState()\n\n if problem.isGoalState(startState):\n return [] # No action needed\n\n closedSet = set()\n queue = util.PriorityQueue()\n queue.push((startState, None, 0), 0)\n cameFrom = dict() # Stores most efficient previous action\n gScore = dict() # Stores current cost from start\n gScore[startState] = 0\n\n # Search\n while queue.heap: # Do while open set is not empty\n (currentState, action, cost) = queue.pop()\n\n if problem.isGoalState(currentState):\n # Goal reached. Construct path\n path = util.Queue() \n \n # Backtrack to start state\n while currentState is not startState and currentState in cameFrom:\n currentState, action = cameFrom[currentState]\n path.push(action)\n\n return path.list\n\n # Expand current state\n closedSet.add(currentState) \n for successor in problem.getSuccessors(currentState):\n successorState, successorAction, successorCost = successor\n \n if successorState in closedSet:\n continue # Skip already expanded states\n \n # Initialize entries not already in dictionaries to a big number\n if currentState not in gScore:\n gScore[currentState] = 999999999999\n if successorState not in gScore:\n gScore[successorState] = 999999999999\n\n # Compare this path to best path\n gTentative = gScore[currentState] + successorCost\n if gTentative >= gScore[successorState]:\n continue # Not a better path\n\n # A better path is found, store this path\n cameFrom[successorState] = (currentState, successorAction)\n gScore[successorState] = gTentative # Store new cost\n # Update the priority queue\n queue.update(successor, gScore[successorState])", "def solution(n, s, a, b, fares):\n\n table = [[float(\"inf\")]*n for _ in range(n)]\n for (c, d, f) in fares:\n table[c-1][d-1] = f\n table[d-1][c-1] = f\n\n for idx in range(n):\n table[idx][idx] = 0\n\n # do floyd to find all shortest paths\n for kdx in range(n):\n for idx in range(n):\n for jdx in range(n):\n table[idx][jdx] = min(table[idx][jdx], table[idx][kdx] + table[kdx][jdx])\n \n# for row in table:\n# print(row)\n \n answer = table[s-1][a-1] + table[s-1][b-1]\n # print(\"seperate:\", answer)\n for idx in range(n):\n # print(\"idx 경유:\", idx, table[s-1][idx] + table[idx][a-1] + table[idx][b-1])\n answer = min(answer, table[s-1][idx] + table[idx][a-1] + table[idx][b-1])\n\n # print(\"answer:\", answer)\n return answer", "def solve(matrix):\n n = len(matrix)\n assert n > 0\n assert len(matrix[0]) == n\n # create 3d matrix\n path = list(list(list(-1\n for _ in range(n))\n for _ in range(n))\n for _ in range(n))\n graph = list(list(list(0.0\n for _ in range(n))\n for _ in range(n))\n for _ in range(n))\n for i in range(n):\n for j in range(n):\n graph[i][j][0] = matrix[i][j]\n path[i][j][0] = i\n return resolve_internal(graph, path)", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n queue = util.PriorityQueue() # PrioritQueue for searshing the graph/ it expand the node with the lowest cost\n visited = [] # Keep track of visited nodes\n path = [] # Keep track of the path\n start =problem.getStartState() # The start node\n\n queue.push((start, path,0), 0) \n \n while not queue.isEmpty():\n (vrtx, path, costparent) = queue.pop() \n if vrtx not in visited: \n if problem.isGoalState(vrtx):\n return [p[1] for p in path]\n\n visited.append(vrtx) \n for successor in problem.getSuccessors(vrtx):\n cost = successor[2]+ costparent\n queue.push((successor[0], path+[successor],cost),cost)\n \n\n util.raiseNotDefined()", "def solve_tour(self):\n\t\tboard = [[-1 for _ in range(self.N)]for _ in range(self.N)]\n\t\tboard[0][0] = 0\n\n\t\tz = self.find_tour(board, 0, 0, 1)\n\t\tif z:\n\t\t\tfor i in range(self.N):\n\t\t\t\tfor j in range(self.N):\n\t\t\t\t\tself.solution.append(board[i][j])\n\t\t\tprint board\n\t\t\treturn self.solution\n\t\t\t\t\n\t\telse:\n\t\t\tprint(\"No solution\")", "def JacobiSolve_Short(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n x_new *= 0 #reset x_new\n #update is (b - whole row * x + diagonal part * x)/diagonal\n x_new = (b - np.dot(A,x)+ A.diagonal()*x)/A.diagonal()\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def iterations(self):\n i = 0\n stateVectorConv = self.stateVectorConvThreshold * 1.0e6\n n = len(self.model.stateVector)\n self.answer = None\n \n while ((i < self.maxiter) \n and (stateVectorConv > self.stateVectorConvThreshold)\n ):\n \n F, K = self.model()\n \n if np.any(np.isnan(F)) or np.any(np.isnan(K)):\n m = \"Iteration {0} failure of model.\"\n raise OptimalEstimationException(m.format(i))\n \n if self.model.verbose > 0:\n self.model.plot(i+1, stateVectorConv)\n \n try:\n self.DecomposeJacobian(K)\n except np.linalg.LinAlgError:\n m = \"Iteration {0} failure in decomposition.\"\n raise OptimalEstimationException(m.format(i))\n \n statevectorOffset = (self.V.T * self.priorSinvh * \n np.matrix(np.array(self.model.stateVector) - np.array(self.model.prior) ).T)\n measurementOffset = (self.U.T * self.errSinvh * \n np.matrix(self.model.observation - F).T)\n \n newState = np.matrix((self.w * \n (measurementOffset.A1 + \n self.w * statevectorOffset.A1))/(self.w**2+1.0)).T\n newState = self.priorSh * self.V * newState\n newState = newState.A1 + self.model.prior\n \n stateVectorConv = ((np.matrix(newState - self.model.stateVector) * \n self.Sinv * np.matrix(newState - self.model.stateVector).T)/n)[0,0]\n self.model.stateVector = newState\n\n if i == 0:\n \n stateVectorConv = self.stateVectorConvThreshold * 1.0e6\n \n print('cost Function for iteration {}:'.format(i), self.costFunction)\n\n i += 1\n \n F, K = self.model()\n if self.model.verbose > 0:\n self.model.plot(i+1, stateVectorConv)\n \n try:\n self.DecomposeJacobian(K)\n except np.linalg.LinAlgError:\n raise OptimalEstimationException(\"Failure in decomposition.\")\n \n Wplus2 = np.matrix(np.diag(1.0/(self.w**2+1.0)))\n self.model.covariance = (self.priorSh * self.V * Wplus2 * \n self.V.T * self.priorSh)\n \n\n \n return i, stateVectorConv", "def uniform_cost_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = util.PriorityQueue()\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.push(state, 0)\r\n\r\n while (True):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n # push into fringe\r\n for stat in states:\r\n if stat[0] not in path:\r\n fringe.push(stat[0], stat[1].piece.get_num_tiles()) #problem.get_cost_of_actions([stat[1]])\r\n\r\n while (True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def __search_path(self, start_node, goal_node):\n\n path = []\n queue = PriorityQueue()\n queue.put((0, start_node))\n visited = set(start_node)\n\n branch = {}\n found = False\n \n while not queue.empty():\n item = queue.get()\n current_cost = item[0]\n current_node = item[1]\n\n if current_node == goal_node: \n found = True\n break\n else:\n for next_node in self._route_graph[current_node]:\n cost = self._route_graph.edges[current_node, next_node]['weight']\n new_cost = current_cost + cost + self.__heuristic(next_node, goal_node)\n\n if next_node not in visited: \n visited.add(next_node) \n queue.put((new_cost, next_node))\n\n branch[next_node] = (new_cost, current_node)\n\n path = []\n path_cost = 0\n if found:\n # retrace steps\n path = []\n n = goal_node\n path_cost = branch[n][0]\n while branch[n][1] != start_node:\n path.append(branch[n][1])\n n = branch[n][1]\n path.append(branch[n][1])\n else:\n print(\"Path Not Found\")\n\n return path[::-1], path_cost", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n\n\n\n path = [starting_car_location]\n dict = {}\n index = 0\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == starting_car_location:\n index = i\n\n path = [index]\n\n G, m = adjacency_matrix_to_graph(adjacency_matrix)\n\n home_indexes = []\n\n for home in list_of_homes:\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == home:\n home_indexes.append(i)\n break\n\n new_adjacency = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n # for sake of figuring out where to walk\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, index, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2:\n di_path = nx.dijkstra_path(G, home1, home2)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n\n all_driving_path = list(nx.dfs_edges(G2))\n\n\n\n\n walking_to = []\n walking_from = {}\n\n for i in range(len(new_adjacency)):\n if i in home_indexes:\n count = 0\n edge_to = 0\n for j in range(len(new_adjacency)):\n if new_adjacency[i][j] != \"x\":\n count += 1\n edge_to = j\n\n #must ensure that this is not a home that we are already dropping someone off at, otherwise it will cut off a line of two homes\n if count == 1 and i != index and i not in walking_from.keys():\n new_adjacency[i][edge_to] = \"x\"\n new_adjacency[edge_to][i] = \"x\"\n walking_to.append(i)\n if edge_to in walking_from:\n walking_from[edge_to] = walking_from[edge_to] + [i]\n else:\n walking_from[edge_to] = [i]\n\n #\n # for i in range(len(all_driving_path) - 1):\n # #if first vertex in edge is the same, we should walk\n # if all_driving_path[i][0] == all_driving_path[i + 1][0]:\n # print(all_driving_path[i][0])\n # print(all_driving_path[i][1])\n # #get rid of only edge connected to this home\n # new_adjacency[all_driving_path[i][0]][all_driving_path[i][1]] = \"x\"\n # new_adjacency[all_driving_path[i][1]][all_driving_path[i][0]] = \"x\"\n # walking_to.append(all_driving_path[i][1])\n # if all_driving_path[i][0] in walking_from:\n # walking_from[all_driving_path[i][0]] = walking_from[all_driving_path[i][0]] + [all_driving_path[i][1]]\n # else:\n # walking_from[all_driving_path[i][0]] = [all_driving_path[i][1]]\n\n\n\n dropoff_locations = list(walking_from.keys())\n for loc in dropoff_locations:\n if loc in home_indexes:\n dropoff_locations.remove(loc)\n\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n # G = G2\n # pos=nx.spring_layout(G2)\n # nx.draw_networkx_nodes(G2,pos)\n # nx.draw_networkx_labels(G2, pos)\n # nx.draw_networkx_edges(G2,pos,width=1.0,alpha=0.5)\n #\n # plt.draw()\n # plt.show()\n\n # condensed shortest paths to edges - use G3 for real\n\n new_adjacency2 = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n for home in home_indexes:\n if home not in walking_to:\n di_path = nx.dijkstra_path(G2, index, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2 and home1 not in walking_to and home2 not in walking_to:\n di_path = nx.dijkstra_path(G2, home1, home2)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G2, index, loc)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G2, loc, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n\n\n final_G, m = adjacency_matrix_to_graph(new_adjacency2)\n drive_path = list(nx.dfs_edges(final_G, source=index))\n drive_path.append(index)\n\n mst = nx.minimum_spanning_tree(final_G)\n\n\n\n new_mst = nx.MultiGraph(mst)\n for edge in mst.edges():\n new_mst.add_edge(edge[0], edge[1])\n\n\n if new_mst.degree[index] != 0:\n to_remove = []\n for node in new_mst:\n if (new_mst.degree[node] == 0):\n to_remove.append(node)\n new_mst.remove_nodes_from(to_remove)\n\n eulerian = list(nx.eulerian_circuit(new_mst, index))\n\n path = []\n for edge in eulerian:\n path.append(edge[0])\n\n path.append(eulerian[len(eulerian) - 1][1])\n\n already_seen = []\n to_remove = []\n for i in range(len(path) - 1):\n if path[i] in already_seen:\n to_remove.append(i)\n else:\n already_seen.append(path[i])\n\n new_path = []\n for i in range(len(path) - 1):\n if i not in to_remove:\n new_path.append(path[i])\n path = new_path\n print(eulerian)\n else:\n path = [index]\n print(path)\n\n\n\n\n\n\n\n # print(path)\n final_path = []\n for node in path:\n if node == index:\n final_path.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path.append(node)\n # print(\"Dropoff loc: \", node)\n final_path.append(index)\n #print(walking_from)\n # print(final_path)\n # nx.draw(mst)\n # plt.draw()\n # plt.show()\n for node in final_path:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path = []\n for i in range(len(final_path) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path[i], final_path[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path.append(condensed_path[j])\n\n if len(very_final_path) >= 1 and [len(very_final_path) - 1] != index:\n very_final_path.append(index)\n\n if len(very_final_path) == 0:\n very_final_path = [index]\n\n print(very_final_path)\n print(dict)\n\n\n path2 = list(nx.dfs_preorder_nodes(mst, index))\n\n final_path2 = []\n for node in path2:\n if node == index:\n final_path2.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path2.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path2.append(node)\n # print(\"Dropoff loc: \", node)\n final_path2.append(index)\n\n\n for node in final_path2:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path2 = []\n for i in range(len(final_path2) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path2[i], final_path2[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path2.append(condensed_path[j])\n\n if len(very_final_path2) >= 1 and [len(very_final_path2) - 1] != index:\n very_final_path2.append(index)\n\n if len(very_final_path2) == 0:\n very_final_path2 = [index]\n\n opt1 = cost_of_solution(G, very_final_path, dict)\n opt2 = cost_of_solution(G, very_final_path2, dict)\n\n ultra_final_path = []\n if (opt1 <= opt2):\n ultra_final_path = very_final_path\n else:\n ultra_final_path = very_final_path2\n\n return ultra_final_path, dict\n\n pass", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n loc_map = {}\n drop_off_dict = {}\n num_home_visited = 0\n\n \"\"\"\n for i in range(len(list_of_locations)):\n loc_map[i] = list_of_locations[0]\n \"\"\"\n\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n num_homes = len(list_of_homes)\n\n car_path = []\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n visited = set()\n\n #print(start)\n car_path.append(start)\n current_node = start\n\n if start in home_indexes:\n visited.add(start)\n drop_off_dict[start] = [start]\n num_home_visited += 1\n\n while num_home_visited < num_homes:\n dist_dict = all_paths.get(current_node)[0]\n paths_dict = all_paths.get(current_node)[1]\n\n dist_dict = {k:v for (k,v) in dist_dict.items() if k not in visited and k in home_indexes}\n min_dist = min(dist_dict.values())\n min_list = [k for k in dist_dict.keys() if dist_dict[k] <= min_dist]\n #print(dist_dict.values())\n target = min_list[0]\n drop_off_dict[target] = [target]\n #print(target+1)\n #print(target)\n car_path.pop()\n car_path.extend(paths_dict[target])\n\n visited.add(target)\n current_node = target\n num_home_visited += 1\n\n paths_dict = all_paths.get(current_node)[1]\n car_path.pop()\n car_path.extend(paths_dict[start])\n #print((drop_off_dict.keys()))\n #car_path = [start, ...., start]\n #drop_off_dict = {drop_off_loc: [home1, home2, ...] }\n\n return car_path, drop_off_dict", "def main():\n\n N, k = map(int, sys.stdin.readline().split())\n initial_state = tuple( map(int, sys.stdin.readline().split()) )\n final_state = tuple( map(int, sys.stdin.readline().split()) )\n\n init_path = [initial_state]\n all_paths = [init_path]\n\n visited_states = set()\n visited_states.add(initial_state)\n\n while all_paths:\n popped_path = all_paths.pop(0)\n current_state = popped_path[-1]\n\n if current_state == final_state:\n solution = popped_path\n break\n\n for state, action in generate_moves(current_state, N, k).items():\n if state not in visited_states:\n visited_states.add(state)\n new_path = popped_path + [action,state]\n all_paths.append(new_path)\n\n display_solution(solution)", "def BFS(maze: list, start: tuple, goal: tuple):\n n = len(maze) # Get the dimension of the maze\n\n #========================================#\n # Some data checking statements\n\n if (not is_valid(start, n)):\n print(\"BFS: Start indices outside maze dimensions\")\n return False\n elif (not is_valid(goal, n)):\n print(\"BFS: Goal indices outside maze dimensions\")\n return False\n\n # End data checking statements\n #========================================#\n\n number_of_nodes_visited = 0\n visited = copy.deepcopy(maze) # We can use a copy of the maze to keep track of visited squares (Considered using a set here, thought that time efficiency was important)\n # visited = list(map(list, maze)) # Alternative to using copy.deepcopy\n\n # Initialize a matrix of the same size as maze where each value is None.\n previous = [[None for i in range(n)] for j in range(n)]\n\n queue = deque() # Define our queue of \"fringe\" squares\n queue.append(start) # Push the start square into our queue\n visited[start[0]][start[1]] = 1 # Set our start to visited\n\n while (len(queue)): # While there exists items in the queue\n current = queue.popleft() # Pop the square at index 0\n number_of_nodes_visited += 1 # Increase number of nodes visited\n\n if (current == goal): # If current is the goal, we found it!\n # We now want to traverse back to make a path using our 'previous' matrix\n path = []\n while (current != None):\n path.append(current)\n current = previous[current[0]][current[1]]\n path.reverse()\n return (True, path, number_of_nodes_visited)\n\n current_i, current_j = current # Unpack the current pair\n \n # Now we want to add all unvisited squares that are possible to get to from the current square\n for i in range(len(nearby_offsets)):\n offset_i, offset_j = nearby_offsets[i]\n possible = (current_i + offset_i, current_j + offset_j)\n # print(f\"Current possible: {possible_i} {possible_j}\") # DEBUG\n if (is_valid(possible, n)): # If the calculated square is within the maze matrix\n # If possible has not been visited yet\n if (not visited[possible[0]][possible[1]]):\n queue.append(possible) # Add possible to our queue\n # Set possible to visited\n visited[possible[0]][possible[1]] = 1\n # Set the previous square for possible to the current square\n previous[possible[0]][possible[1]] = current\n # If the while loop goes out, and the queue is empty, then there is no possible path\n return (False, [], number_of_nodes_visited)", "def solve(board):\r\n \r\n #An O(2mn) time solution; the first O(mn) traversal is to preform a bfs on all tiles attached to edge 'O' tiles (can't convert to 'X's); the second is to convert all remaining 'O's into 'X's\r\n \r\n def bfs(curr,r,c):\r\n if not curr: return\r\n prev = len(curr)\r\n for n in range(0,prev):\r\n i,j = curr[n][0],curr[n][1]\r\n board[i][j] = 'A'\r\n for x,y in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\r\n x_n = i+x\r\n y_n = j+y\r\n if x_n >= 0 and x_n < r and y_n >= 0 and y_n < c and board[x_n][y_n] == \"O\":\r\n curr += [(x_n,y_n)]\r\n bfs(curr[prev:],r,c)\r\n\r\n \r\n q,r,c = [],len(board),len(board[0])\r\n if not r or q: return\r\n\r\n for i in range(r):\r\n for j in range(c):\r\n if (i==0 or j==0 or i==r-1 or j==c-1) and board[i][j] == \"O\":\r\n q += [(i,j)]\r\n \r\n bfs(q,r,c)\r\n\r\n for i in range(r):\r\n for j in range(c): \r\n if board[i][j] == \"O\": \r\n board[i][j] = \"X\"\r\n elif board[i][j] == \"A\":\r\n board[i][j] = \"O\"\r\n \r\n return", "def breadthFirstSearchPaths(problem):\n #import pdb;pdb.set_trace()\n frontier = util.Queue()\n start_node = problem.getStartState()\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,()))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n\n #explored.add(node[0])\n # exploratory code for SUPER-optimal solution:\n # by saving the path in explored, we assure that we explore the same cell even if\n # two different actions go through it:\n explored.add(node)\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1]\n next_actions = actions + (action,)\n new_node = (nextState, next_actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def solve(m):\n\t\n #with the assumption that at least one terminal state is given:\n if(len(m)==2 or len(m)==1): return [1,1]\n \n #Normalizing the in. matrix and identifying the trans./abs. states:\n m = normalizeProbabilityMatrix(m)\n t = getTransientStates(m)\n a = getAbsorbingStates(m)\n\t\n if len(a) >0:\n print( str(len(a)) + \" absorbing state\" + (\"\" if len(a)<=1 else \"s\" ))\n else:\n print(\"No absorbing state detected\")\n return\n \n #Getting the matrices Q and R as in the canonical form:\n Q = getQ(m,t)\n R = getR(m,t,a)\n I = getIdentity(len(Q))\n I_Q = subtractMatrices(I, Q)\n \n #Getting the fundamental matrix\n N = invertMatrix(I_Q)\n F = multiplyMatrices(N,R)\n \n #packing the result with a common denominator:\n gcd = getGCD(F[0]).denominator\n res=[]\n sum = 0\n for r in F[0]:\n val = int(r.numerator*(gcd/r.denominator))\n sum+=val\n res.append(val)\n res.append(sum) \n return res", "def TSP_ILP(G):\n V1 = range(len(G))\n n, V = len(G), set(V1)\n model = Model() # binary variables indicating if arc (i,j) is used\n # on the route or not\n x = [[model.add_var(var_type=BINARY) for j in V] for i in V]\n # continuous variable to prevent subtours: each city will have a\n # different sequential id in the planned route except the 1st one\n y = [model.add_var() for i in V]\n # objective function: minimize the distance\n model.objective = minimize(xsum(G[i][j]*x[i][j] for i in V for j in V))\n\n # constraint : leave each city only once\n for i in V:\n model += xsum(x[i][j] for j in V - {i}) == 1\n # constraint : enter each city only once\n for i in V:\n model += xsum(x[j][i] for j in V - {i}) == 1 # subtour elimination\n for (i, j) in product(V - {0}, V - {0}):\n if i != j:\n model += y[i] - (n+1)*x[i][j] >= y[j]-n # optimizing\n\n model.verbose = 0\n model.optimize() # checking if a solution was found\n\n if model.num_solutions:\n nc = 0 # cycle starts from vertex 0\n cycle = [nc]\n while True:\n nc = [i for i in V if x[nc][i].x >= 0.99][0]\n cycle.append(nc)\n if nc == 0:\n break\n\n return (model.objective_value, cycle)", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # Priority Queue to hold the node along with the path taken from the start node to reach that node\n pqueue = PriorityQueue()\n #Set to hold the node explored.\n explorednode = set()\n # Get the start node.\n startnode = problem.getStartState()\n # Push the starting node on the Queue along with an empty set to know the direction in order to reach the node.\n pqueue.push((startnode,[]),0)\n\n # Loop till the priority queue is empty\n while pqueue.isEmpty() is not True:\n # Pop the currentnode and the direction from the priority queue\n (currentnode,direction) = pqueue.pop()\n # Check if the currentnode is not in the explored node.\n if currentnode not in explorednode:\n # We will now add the node to set of explored node.\n explorednode.add(currentnode)\n # If the node is the goal. We made it!!\n if problem.isGoalState(currentnode):\n # The direction holds the way to reach till the goal from the start node.\n return direction\n # Loop for each successor(child) of the current node.\n for (successor, action, stepCost) in problem.getSuccessors(currentnode):\n # Add the successor to the queue along with the path to reach it.\n if successor not in explorednode:\n # Add the successor to the queue along with the path to reach it.\n pqueue.push((successor, direction + [action]), problem.getCostOfActions(direction + [action]))\n util.raiseNotDefined()", "def change_way(coins, opponentLocation, player_location):\n global best_weight, best_path\n dist_matrix, route_matrix = u.update_dists_from_each(dists_matrix, routes_matrix, player_location, mazeMap, coins)\n coins_to_search = get_n_shortest(5, coins, player_location, dists_matrix)\n ennemy_dists = algo.dijkstra(mazeMap, opponentLocation)\n for c in coins_to_search:\n if len(coins_to_search) >= 2 and ennemy_dists[1][c] < dists_matrix[player_location][c]:\n coins_to_search.remove(c)\n break\n best_weight = float(\"inf\")\n best_path = []\n api.debug(coins_to_search)\n exhaustive(coins_to_search, player_location, [], 0, dist_matrix)\n meta_route = [player_location] + best_path\n api.debug(meta_route)\n route = u.location_list_to_route(meta_route, route_matrix)\n \n return coins_to_search, meta_route, route, dist_matrix[player_location][meta_route[1]]", "def backtrace(sequence, M, P, i, j):\n if j <= i:\n return\n\n if M[i][j] == M[i][j-1]:\n backtrace(sequence, M, P, i, j-1)\n\n else:\n for k in range(i, j):\n if costFunction(sequence[k], sequence[j]):\n if k-1 < 0:\n if M[i][j] == M[k+1][j-1]+1:\n if (k, j) not in P:\n P.append((k, j))\n backtrace(sequence, M, P, k+1, j-1)\n if M[i][j] == M[i, k-1] + M[k+1][j-1] + 1:\n if (k, j) not in P:\n P.append((k, j))\n backtrace(sequence, M, P, i, k-1)\n backtrace(sequence, M, P, k+1, j-1)\n break", "def solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths):\n\n car_path = [get_car_path(graph,home_clusters,source,all_pairs_distances,all_pairs_shortest_paths, \n source_in_clusters = B1, christofides = B2) for B1 in [False,True] for B2 in [False,True]]\n\n dropoffs = [cluster_solver_utils.nearest_dropoff_efficient(graph,path,homes,all_pairs_distances) for path in car_path]\n cost = [cluster_solver_utils.eval_cost_efficient(graph,car_path[i],dropoffs[i],all_pairs_distances) for i in range(len(car_path))]\n\n minimum_cost = min(cost)\n idx = cost.index(minimum_cost)\n\n return minimum_cost, dropoffs[idx], car_path[idx]", "def find_shortest_path_to_other_vertices_using_Matrix_Multiply(N, v0):\n Nv = N.shape[0]\n N__ = np.copy(N)\n DISTANCES = np.full([Nv], -1, dtype=int)\n DISTANCES[v0] = 0\n max_path_len_possible = Nv - 1\n keep_searching = True\n i = 0\n while i <= max_path_len_possible and keep_searching == True:\n N_ = np.copy(N__)\n all_marked = True\n ii = 0\n while ii < Nv:\n if DISTANCES[ii] == -1 and N__[v0][ii] != 0:\n DISTANCES[ii] = i + 1 # shortest path to node ii from v0\n elif DISTANCES[ii] == -1:\n all_marked = False # not all paths found\n ii = ii + 1\n if all_marked == True:\n keep_searching = False\n N__ = np.dot(N_, N)\n i = i + 1\n\n return DISTANCES", "def hillClimbingSearch_S(problem, userInteraction, beQuiet):\n\n currentState = problem.state\n if not beQuiet:\n problem.visualize(currentState)\n\n # for visualization\n problem.hVals.append(problem.getObjValue(currentState))\n \n steps=0\n while True:\n if problem.isGlobalOptimum(currentState):\n return steps, currentState\n neighbours = problem.getNeighbours(currentState)\n runningBest = currentState\n for n in neighbours:\n nObjVal = problem.getObjValue(n)\n runningBestVal = problem.getObjValue(runningBest)\n if problem.isBetter(nObjVal, runningBestVal):\n runningBest = n\n\n if runningBest is currentState:\n # no neighbour is better, optimum reached\n return steps, currentState\n else:\n # jump to best neighbour\n currentState = runningBest\n\n # for visualization later on\n problem.hVals.append(problem.getObjValue(currentState))\n steps+=1\n if not beQuiet:\n if userInteraction:\n input(\"Press enter to continue \")\n problem.visualize(currentState)", "def _matrix_store_smooth_downhill(self):\n \n import time\n from scipy import sparse as sparse\n from scipy.sparse import linalg as linalgs \n \n\n t = time.clock()\n\n\n size = 0\n for nl in self.neighbour_array_lo_hi:\n size += 3 # len(nl)\n\n row_array = np.empty(size, dtype = int)\n col_array = np.empty(size, dtype = int)\n slope_array = np.zeros(size)\n local_slope_array = np.zeros(64)\n\n\n idx=0 \n for row in range(0, len(self.neighbour_array_lo_hi)): \n neighbours = self.neighbour_array_lo_hi[row] \n npoints = self.tri.points[neighbours]\n\n ## work out (downhill) gradient to (max of three) nearby neighbours\n \n\n for col, column in enumerate(neighbours[0:3]): \n \n delta_h = self.height[column] - self.height[row] \n\n\n if delta_h < 0.0:\n delta_s2 = (self.x[column] - self.x[row])**2 + (self.y[column] - self.y[row])**2\n local_slope_array[col] = ( delta_h**2 / delta_s2 )**5\n\n elif delta_h == 0.0 and self.bmask[row] == False:\n local_slope_array[col] = 1.0e-20\n\n else:\n local_slope_array[col] = 1.0e-20 \n \n # Normalise this so that it conserves mass (note - low points will have no contributions here !) \n \n norm = local_slope_array[0:len(neighbours)].sum()\n if norm != 0.0:\n norm = 1.0 / norm\n\n for col, column in enumerate(neighbours[0:3]): \n row_array[idx] = row\n col_array[idx] = column \n slope_array[idx] = local_slope_array[col] * norm\n\n idx += 1\n\n # We can re-pack this array into a sparse matrix for v. fast computation of downhill operator \n\n slopeCOO = sparse.coo_matrix( (slope_array, (row_array, col_array)) ).T\n slopeMat = slopeCOO.tocsr() \n \n print \"SlopeMat.shape \", slopeMat.shape, size\n\n # slopeNormVec = np.array(slopeMat.sum(axis=1)).T[0]\n # slopeNormVec[slopeNormVec != 0.0] = 1.0 / slopeNormVec[slopeNormVec != 0.0]\n # slopeNormMat = sparse.eye(self.tri.npoints)\n # slopeNormMat.setdiag(slopeNormVec)\n # slopeMat = slopeNormMat.dot(slopeMat)\n\n slopeMat.eliminate_zeros()\n self.smoothDownhillMat = slopeMat\n\n return", "def cost_function(H, n_qubits, p, params):\n ini_state=plus_state(n_qubits)\n for i in range(p):\n ini_state=qaoa_step(ini_state,H,n_qubits,params=[params[2*i],params[2*i+1]])\n return ((sparse.spmatrix.getH(ini_state)).dot(H.dot(ini_state))).real, ini_state", "def solve(problem):\n\n # *** YOUR CODE HERE ***\n\n # The core of Iterative Deepening Search are iterations of Depth Limited\n # Search with given increasing depth.\n\n # A recursive version of Depth Limited Search\n def depth_limited_search(problem, limit):\n \"\"\"\n Return a list of nodes we traversed (or None).\n :param problem: the starting set up.\n :param limit: a given numeric depth limit.\n :return: a list of nodes.\n \"\"\"\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path\n\n import sys\n for depth in range(sys.maxsize): # depth from 0 to infinity\n print(\"Lower-bound of the optimal cost is {}\".format(depth))\n res2 = depth_limited_search(problem, depth)\n if res2 is not None:\n action_list = list()\n for move in res2:\n action_list.append(move[1]) # recall index 0 is the parent\n # do not forget a None returned in iteration 0 (with depth 0)\n action_list.remove('None')\n return action_list", "def path_between_states(self):\n\n start_given = (self.row_before, self.col_before) # row, col before state transition\n finish_given = (self.row_after, self.col_after) # row, col after state transition\n\n # find_path based on a* algorithm\n path = find_path(Customer.GRID, start_given, finish_given, Customer.POSSIBLE_MOVES)\n\n # if empty path fillin values to enable next step interpolation into 1s resolution\n if start_given == finish_given:\n path = [(self.row_before, self.col_before), (self.row_after, self.col_after)]\n\n self.path = path", "def MST(waypoints):\n\n n = len(waypoints) - 1\n\n # initialize A, and B matrix\n A = np.zeros((8*n, 8*n))\n B = np.zeros((8*n, 1))\n\n # populate B matrix.\n for i in range(n):\n B[i] = waypoints[i]\n B[i + n] = waypoints[i+1]\n\n # Constraint 1\n for i in range(n):\n A[i][8*i:8*(i+1)] = get_poly_cc(8, 0, 0)\n\n # Constraint 2\n for i in range(n):\n A[i+n][8*i:8*(i+1)] = get_poly_cc(8, 0, 1)\n\n # Constraint 3\n for k in range(1, 4):\n A[2*n+k-1][:8] = get_poly_cc(8, k, 0)\n\n # Constraint 4\n for k in range(1, 4):\n A[2*n+3+k-1][-8:] = get_poly_cc(8, k, 1)\n\n # Constraint 5\n for i in range(n-1):\n for k in range(1, 7):\n A[2*n+6 + i*6+k-1][i*8 : (i*8+16)] = np.concatenate((get_poly_cc(8, k, 1), -get_poly_cc(8, k, 0)))\n\n # solve for the coefficients\n Coeff = np.linalg.solve(A, B)\n return Coeff", "def a_star_graph(graph, h, start, goal):\n \n path = []\n queue = PriorityQueue()\n queue.put((0, start))\n visited = set(start)\n\n branch = {}\n found = False\n \n while not queue.empty():\n item = queue.get()\n current_cost = item[0]\n current_node = item[1]\n\n if current_node == goal: \n print('Found a path.')\n found = True\n break\n else:\n for next_node in graph[current_node]:\n cost = graph.edges[current_node, next_node]['weight']\n new_cost = current_cost + cost + h(next_node, goal)\n \n if next_node not in visited: \n visited.add(next_node) \n queue.put((new_cost, next_node))\n \n branch[next_node] = (new_cost, current_node)\n \n path = []\n path_cost = 0\n if found:\n \n # retrace steps\n path = []\n n = goal\n path_cost = branch[n][0]\n while branch[n][1] != start:\n path.append(branch[n][1])\n n = branch[n][1]\n path.append(branch[n][1])\n \n return path[::-1], path_cost", "def solve(list_of_kingdom_names, starting_kingdom, adjacency_matrix, params=[]):\n\n #A = adjacency matrix, u = vertex u, v = vertex v\n def weight(A, u, v):\n return A[u][v]\n\n #A = adjacency matrix, u = vertex u\n def adjacent(A, u):\n L = []\n for x in range(len(A)):\n if A[u][x] > 0 and x != u and A[u][x] != 'x':\n L.insert(0,x)\n return L\n\n #Q = min queue\n def extractMin(Q):\n q = Q[0]\n Q.remove(Q[0])\n return q\n\n #Q = min queue, V = vertex list\n def decreaseKey(Q, K):\n for i in range(len(Q)):\n for j in range(len(Q)):\n if K[Q[i]] < K[Q[j]]:\n s = Q[i]\n Q[i] = Q[j]\n Q[j] = s\n\n #V = vertex list, A = adjacency list, r = root\n def prim(V, A, r):\n u = 0\n v = 0\n\n # initialize and set each value of the array P (pi) to none\n # pi holds the parent of u, so P(v)=u means u is the parent of v\n P=[None]*len(V)\n\n # initialize and set each value of the array K (key) to some large number (simulate infinity)\n K = [999999]*len(V)\n\n # initialize the min queue and fill it with all vertices in V\n Q=[0]*len(V)\n for u in range(len(Q)):\n Q[u] = V[u]\n\n # set the key of the root to 0\n K[r] = 0\n decreaseKey(Q, K) # maintain the min queue\n\n # loop while the min queue is not empty\n while len(Q) > 0:\n u = extractMin(Q) # pop the first vertex off the min queue\n\n # loop through the vertices adjacent to u\n Adj = adjacent(A, u)\n for v in Adj:\n w = weight(A, u, v) # get the weight of the edge uv\n\n # proceed if v is in Q and the weight of uv is less than v's key\n if Q.count(v)>0 and w < K[v]:\n # set v's parent to u\n P[v] = u\n # v's key to the weight of uv\n K[v] = w\n decreaseKey(Q, K) # maintain the min queue\n return P\n\n\n # graph is a list of kingdoms that previous i is the parent of j where j = i + 1 \n graph = prim(adjacency_matrix, list_of_kingdom_names, starting_kingdom)\n\n # key = parent, value = children\n g = {}\n\n for x in range(len(list_of_kingdom_names)):\n g[x] = []\n\n for x in range(len(graph)):\n for y in range(len(graph)):\n if x == graph[y]:\n g[x].append(y) \n\n\n def path(k):\n if not g[k]:\n return [k]\n\n lst = [k]\n\n for child in g[k]:\n lst += path(child) + [k]\n # print(lst)\n\n return lst\n\n\n full_path = path(starting_kingdom)\n\n # print(full_path)\n\n\n\n # return closed_walk, conquered_kingdoms", "def optimal_path(self, mission, start, sp):\n mission.add(start)\n while mission.targets[0] and mission.is_active():\n ds = [(sp[start][t], t) for t in mission.targets[0] if t in sp[start]]\n if not ds:\n mission.add(u'-1') # target not connected --> fill with dummies\n continue\n target = min(ds)\n for i in range(target[0] - 1):\n mission.add(u'0')\n mission.add(target[1])\n start = target[1]", "def find_topo_order(s,graph):\n\n ## initialization\n matrix = graph.get_adjacency()\n n, c = matrix.shape\n sym_matrix = np.empty((n,c), dtype=object)\n # cost_matrix = np.zeros((n,c))\n cache = {}\n\n def symbolize(i,j):\n \"given two indices, create a symbolic variable\"\n s = z.Int('edge_{0}{1}'.format(i,j))\n return s\n\n\n def value_of(i,j):\n \"given two indices, return the (i,j)th value in the adjacency matrix\"\n return sym_matrix[i][j]\n\n\n def constraint_1(n,i,j,k):\n y_ij = value_of(i,j)\n y_jk = value_of(j,k)\n y_ik = value_of(i,k)\n\n name = \"c1\" + str((n,i,j,k))\n constraint = (y_ij + y_jk - y_ik) <= 1\n\n # if name not in cache:\n # cache[name] = constraint\n s.assert_and_track(constraint, name)\n\n\n def constraint_2(n,i,j,k):\n y_ij = value_of(i,j)\n y_jk = value_of(j,k)\n y_ik = value_of(i,k)\n\n name = \"c2\" + str((n,i,j,k))\n constraint = (-y_ij - y_jk + y_ik) <= 0\n\n # if name not in cache:\n # cache[name] = constraint\n s.assert_and_track(constraint, name)\n\n\n def constraint_3(symbolic):\n s.add(z.Or([symbolic == 0, symbolic == 1]))\n\n\n def int_formulation(j):\n left = z.Sum([matrix[k][j] * sym_matrix[k][j] for k in range(j)])\n right = z.Sum([matrix[l][j] * (1 - sym_matrix[j][l]) for l in range(j+1, n)])\n\n return [left, right]\n\n\n ## constraint 3, every edge must be a 0 or a 1, we get the 0 or 1 directly\n ## from the adjacency matrix\n ## we do this first so that the sym_matrix is populated\n for n_iter in range(n):\n for j in range(n_iter+1):\n for i in range(j):\n s_edge = symbolize(i,j)\n sym_matrix[i][j] = s_edge\n constraint_3(s_edge)\n\n ## Iteration for triangle inequalities\n for n_iter in range(n):\n for k in range(n_iter+1):\n for j in range(k):\n for i in range(j):\n constraint_1(n_iter,i,j,k)\n constraint_2(n_iter,i,j,k)\n\n\n ## minimization\n o = z.Optimize()\n y = z.Int('y')\n\n y = z.Sum(u.flatten([int_formulation(j) for j in range(n)]))\n o.minimize(y)\n\n result = []\n\n if s.check() == z.sat:\n result = s.model()\n\n return result", "def main():\n\n # The X's represent the boundaries of the maze.\n # Reaching state G results in a reward of +1.\n # Reaching state E results in a reward of -1.\n grid = [ # 4 x 3 maze.\n \"XXXXXX\",\n \"X GX\",\n \"X X EX\",\n \"X X\",\n \"XXXXXX\"\n ]\n\n grid2 = [ # 10 x 8 maze.\n \"XXXXXXXXXXXX\",\n \"X X X\",\n \"X X XXXXXX X\",\n \"X EX\",\n \"XX XXXXXX X\",\n \"X X X X\",\n \"X XX XGXX X\",\n \"X XX X X\",\n \"XXXX XX\",\n \"XXXXXXXXXXXX\"\n ]\n\n maze = Maze(grid, (2, 1))\n maze.display()\n\n agent = QAgent(maze)\n agent.qlearn(250)\n path = agent.solve_maze()\n\n while path:\n move = path.pop(0)\n maze = maze.neighbor(move)\n time.sleep(0.50)\n maze.display()\n\n print(\"path: \" + str(path))\n print(\"Q table:\")\n print(agent.get_qtable_str())", "def alternative_iterative_method(x0, n, gamma, b):\n # Parameters:\n MAX_ITER = 1000\n n2 = n**2\n\n # Creating NxN versions of vector for easier indexing during iteration\n b = b.copy().reshape(n, n)\n b_transposed = b.copy().T\n x0 = x0.copy().reshape(n, n)\n x0_transposed = x0.copy().T\n x1 = x0.copy()\n x1_transposed = x0_transposed.copy()\n\n # No need for M, N, only a smaller tridiagonal system:\n H = scipy.sparse.diags((-1, 2, -1), (-1, 0, 1), shape=(n, n), format=\"csr\")\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n, n), format=\"csr\")\n M1 = gammaI + H # Corresponds to both (gI + M) & (gI + N) in equations\n M2 = gammaI - H # Corresponds to both (gI - M) & (gI - N) in equations\n\n # Preallocating RHS of equations\n RHS7 = np.zeros((n, n), dtype=np.float64)\n RHS8 = np.zeros((n, n), dtype=np.float64)\n\n k = 0\n while k < MAX_ITER:\n for i in range(n): # Loading RHS values for Equation (7):\n RHS7[:, i] = scipy.sparse.csr_matrix.dot(M2, x0_transposed[i]) + b_transposed[i]\n for i in range(n): # Solving N independent tridig mat systems related to Eq(7):\n x1[i] = scipy.sparse.linalg.spsolve(M1, RHS7[i])\n RHS8[i] = scipy.sparse.csr_matrix.dot(M2, x1[i]) + b[i] # Loading RHS values for Equation (8):\n for i in range(n): # Solving N independent tridig mat systems related to Eq(8):\n x1_transposed[i] = scipy.sparse.linalg.spsolve(M1, RHS8[:, i])\n\n k += 1\n if np.allclose(x1_transposed, x0_transposed, rtol=1e-8):\n break\n x0_transposed = x1_transposed.copy()\n\n res = x1_transposed.T.reshape(n2)\n return res, k", "def find_min_path(s, t, dist):\n\n rows = len(dist) - 1\n cols = len(dist[0]) - 1\n col = cols\n row = rows\n pos_str = \"Position: (row={} col={}) -> (row={} col={})\"\n cst_str = \"Cost: {}\"\n prev_row = row\n prev_col = col\n\n # init sparse path matrix\n sparse_path = [[\" \" for x in range(cols + 1)] for x in range(rows + 1)]\n sparse_path[0][0] = \"0\"\n\n # start with operation at (rows, cols) and work backwards\n sparse_path[rows][cols] = dist[rows][cols]\n\n if verbose == 2:\n print()\n print(\"Initial Minimum Path Matrix:\")\n print_matrix(s, t, sparse_path)\n\n while True:\n\n # bail out if we are in the corner\n if row == 0 and col == 0:\n break\n\n # if we are not at a matrix boundary\n if row != 0 and col != 0: # if at left edge or top row, cannot move diagonally\n\n # diagonal\n if (dist[row - 1][col - 1] == min(dist[row - 1][col],\n dist[row][col - 1],\n dist[row - 1][col - 1])) and (dist[row - 1][col - 1] == dist[row][col] or dist[row - 1][col - 1] == dist[row][col] - 1):\n sparse_path[row - 1][col - 1] = dist[row - 1][col - 1]\n temp_cost = dist[row - 1][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # left\n elif dist[row][col - 1] <= dist[row][col]:\n sparse_path[row][col - 1] = dist[row][col - 1]\n temp_cost = dist[row][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # above\n else:\n sparse_path[row - 1][col] = dist[row - 1][col]\n temp_cost = dist[row - 1][col]\n\n # move current cell\n prev_row = row\n prev_col = col\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # if at matrix edge, can only move up\n elif col == 0:\n # above\n sparse_path[row - 1][col] = dist[row - 1][col]\n temp_cost = dist[row - 1][col]\n\n # move current cell\n prev_row = row\n prev_col = col\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # must be at row boundary, can only move left\n else:\n # left\n if dist[row][col - 1] <= dist[row][col]:\n sparse_path[row][col - 1] = dist[row][col - 1]\n temp_cost = dist[row][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # print matrix\n if verbose == 2:\n print_matrix(s, t, sparse_path)\n\n return sparse_path", "def solute(self, puzzle):\r\n \"\"\"suppose that ax = c, where a is a matrix, c and x are vectors.\"\"\"\r\n \"\"\"The aim is to figure out x, which indicates the solution.\"\"\"\r\n A, a, c = [], [], []\r\n for i in range(puzzle.row):\r\n for j in range(puzzle.column):\r\n # create a puzzle.row * puzzle.column by puzzle.row * puzzle.column matrix.\r\n # each column represents a cell in the puzzle.\r\n # each row represents the changed cell if column c is selected.\r\n if puzzle.lights[i][j] == -1:\r\n c.append(1)\r\n else:\r\n c.append(0)\r\n for m in range(puzzle.row):\r\n for n in range(puzzle.column):\r\n if self.is_adjecent([m, n], [i, j]):\r\n # if [m, n] is adjecent to [i, j], then a[ij][mn] should be 1.\r\n a.append(1)\r\n else:\r\n a.append(0)\r\n a.append(c[i * puzzle.column + j])\r\n A.append(a)\r\n a = []\r\n\r\n self.eliminate(A)\r\n x = [item[len(item) - 1] for item in A]\r\n # x is the last column of A.\r\n # if x[i] is 1, cell i should be selected.\r\n i = 0\r\n for m in range(puzzle.row):\r\n for n in range(puzzle.column):\r\n if x[i] == 1:\r\n puzzle.selection.add((m, n))\r\n i += 1\r\n\r\n return puzzle.selection", "def Cost_of_Hamiltonian_Path(path):\n cost = 0\n for i in range(len(path)-1):\n cost += edge_dic[(path[i], path[i+1])]\n return cost", "def error_estimation_simplex(vertex_vector_h, vertex_chi_sq_h, func):\n # print(\"\\nvertex_vector\")\n # print(vertex_vector_h)\n # print(\"\\nvertex_chi_sq\")\n # print(vertex_chi_sq_h)\n\n # temporary solution\n k, hh = vertex_vector_h.shape # hh = k-1\n theta_0 = vertex_vector_h[0, :]\n m_q = numpy.zeros((k-1, k-1))\n vertex_vector = numpy.zeros(vertex_vector_h.shape, dtype=float)\n vertex_vector[0, :] = theta_0\n max_radius = numpy.zeros(k-1, dtype=float)\n for i in range(1, k):\n theta_i = vertex_vector_h[i, :]\n rand_radius = numpy.abs(theta_i-theta_0)\n max_radius = numpy.max(numpy.vstack([max_radius, rand_radius]), axis=0)\n # print(\"max_radius \", max_radius)\n for i in range(1, k):\n radius_h = numpy.zeros(k-1, dtype=float)\n radius_h[i-1] = max_radius[i-1]\n vertex_vector[i, :] = theta_0+radius_h\n\n l_chi_sq = []\n for i in range(0, k):\n theta_i = vertex_vector_h[i, :]\n chi_sq = func(theta_i)\n l_chi_sq.append(chi_sq)\n vertex_chi_sq = numpy.array(l_chi_sq, dtype=float)\n\n # print(\"hh, k: \", hh, k)\n # print(\"theta_0: \", theta_0)\n chi_sq_0 = vertex_chi_sq[0]\n # print(\"chi_sq_0: \", chi_sq_0)\n v_a = numpy.zeros(k-1)\n m_b = numpy.zeros((k-1, k-1))\n m_q = numpy.zeros((k-1, k-1))\n m_chi_sq_0i = numpy.zeros(k-1)\n # print(\"step 1\")\n for i in range(1, k):\n theta_i = vertex_vector[i, :]\n theta_0i = 0.5*(theta_0+theta_i)\n chi_sq_0i = func(theta_0i)\n # print(\"ii: {:} {:}\".format(i, chi_sq_0i))\n m_chi_sq_0i[i-1] = chi_sq_0i\n m_q[i-1, :] = theta_i-theta_0\n\n # print(\"step 2\")\n for i in range(1, k):\n chi_sq_i = vertex_chi_sq[i]\n theta_i = vertex_vector[i, :]\n chi_sq_0i = m_chi_sq_0i[i-1]\n\n a_i = 4.*chi_sq_0i - chi_sq_i - 3.*chi_sq_0\n v_a[i-1] = a_i\n\n b_ii = 2.*(chi_sq_i + chi_sq_0 - 2.*chi_sq_0i)\n m_b[i-1, i-1] = b_ii\n\n for j in range(i+1, k):\n chi_sq_0j = m_chi_sq_0i[j-1]\n theta_j = vertex_vector[j, :]\n theta_ij = 0.5*(theta_i+theta_j)\n chi_sq_ij = func(theta_ij)\n # print(\"ij: {:} {:} {:}\".format(i, j, chi_sq_ij))\n b_ij = 2.*(chi_sq_ij + chi_sq_0 - chi_sq_0i - chi_sq_0j)\n m_b[i-1, j-1] = b_ij\n m_b[j-1, i-1] = b_ij\n # print(\"step 3\")\n m_ib = numpy.linalg.inv(m_b)\n m_qib = numpy.matmul(m_q, m_ib)\n v_qiba = numpy.matmul(m_qib, v_a)\n # theta_min = theta_0 - v_qiba\n m_qibqt = numpy.matmul(m_qib, m_q.transpose())\n m_error = 2.*chi_sq_0*m_qibqt\n\n # print(\"\\nm_q\")\n # print(m_q)\n # print(\"\\nm_b\")\n # print(m_b)\n # print(\"\\nm_ib\")\n # print(m_ib)\n # print(\"\\nv_a\")\n # print(v_a)\n # print(\"\\ntheta_min: \", theta_min)\n # print(\"\\ntheta_0: \", theta_0)\n\n # print(\"\\nm_error: \", m_error)\n # print(50*\"*\")\n return m_error, numpy.abs(v_qiba)", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n\n pq = PriorityQueue()\n visited = []\n start = problem.getStartState()\n mapper = {}\n \n mapper[problem.getStartState()] = None\n pq.push(problem.getStartState(), 1)\n\n while (not pq.isEmpty()):\n point = pq.pop()\n if problem.isGoalState(point):\n current = point\n l = []\n while mapper[current] != None:\n tup = mapper[current]\n l.append(tup[1])\n current = tup[0]\n l.reverse()\n print l\n return l\n #util.raiseNotDefined()\n if not (point in visited):\n visited.append(point)\n succs = problem.getSuccessors(point)\n succs.reverse()\n for child in succs:\n if not (child[0] in mapper):\n pq.push(child[0], child[2]) #child has (xy, direction, weight)\n mapper[child[0]] = point, child[1]\n # util.raiseNotDefined()", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n startState = problem.getStartState()\n fringe = util.PriorityQueue()\n cost = 0 \n visitedNodes = []\n actions = []\n \n \"\"\" \n Format of Priority Queue :\n (item , priority)\n item => state , actions , cost\n priorityQueue.push ( (state , actions , cost) , cost )\n \n \"\"\"\n \n if ( problem.isGoalState(startState) ):\n return actions\n else :\n newNode = startState , actions , cost\n priority = cost\n fringe.push( newNode , priority )\n while ( fringe.isEmpty() == False ):\n currentState , actions , cost = fringe.pop()\n if ( problem.isGoalState(currentState) == True ) :\n #print(\"Final Path : \" + str(actions))\n return actions\n else :\n if ( (currentState in visitedNodes) == False ):\n visitedNodes.append(currentState)\n currentStateSuccessors = problem.getSuccessors(currentState)\n for node in currentStateSuccessors :\n state , action , stateCost = node\n if( ( state in visitedNodes) == False ) :\n newNode = state , actions + [action] , cost + stateCost\n priority = cost + stateCost\n fringe.push( newNode , priority )\n util.raiseNotDefined()", "def a_star(grid, heuristic_func, start, goal):\n\n path = []\n path_cost = 0\n queue = PriorityQueue()\n queue.put((0, start))\n visited = set(start)\n\n branch = {}\n found = False\n\n while not queue.empty():\n item = queue.get()\n current_cost = item[0]\n current_node = item[1]\n\n if current_node == goal:\n print('Found a path.')\n found = True\n break\n else:\n # Get the new vertexes connected to the current vertex\n for a in valid_actions(grid, current_node):\n next_node = (current_node[0] + a.delta[0], current_node[1] + a.delta[1])\n new_cost = current_cost + a.cost + heuristic_func(next_node, goal)\n\n if next_node not in visited:\n visited.add(next_node)\n queue.put((new_cost, next_node))\n\n branch[next_node] = (new_cost, current_node, a)\n\n if found:\n # retrace steps\n n = goal\n path_cost = branch[n][0]\n while branch[n][1] != start:\n path.append(branch[n][1])\n n = branch[n][1]\n path.append(branch[n][1])\n\n return path[::-1], path_cost", "def astar(maze):\n # TODO: Write your code here\n gFunction = {}\n frontier = PriorityQueue()\n path = []\n ret = []\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives[0])\n gFunction[start] = 0\n frontier.put(start)\n\n while not frontier.empty():\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n\n # objective found, initialise backtrace and exit search\n if maze.isObjective(currentCell[0], currentCell[1]):\n\n path.append(currentState)\n ret.append(currentCell)\n break\n\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectives[0])\n gVal= gFunction[currentState]+1\n\n # if neighbor is not visited or if we found better path to it, add it to the frontier\n if neighbor not in gFunction or gVal < gFunction[neighbor]:\n neighbor.setParent(currentState)\n gFunction[neighbor] = gVal\n hFunction = abs(objectives[0][0] - i[0]) + abs(objectives[0][1] - i[1]) # use manhatten distance as heuristic\n neighbor.setfFunction(gFunction[neighbor] + hFunction)\n frontier.put(neighbor)\n\n # backtrace\n while path[0]!= start:\n \n currentCell = path[0]\n path.insert(0, currentCell.parent())\n ret.insert(0, currentCell.parent().cell())\n\n return ret", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n\n result = []\n qu = util.PriorityQueue()\n visited = set([])\n current = (problem.getStartState(), \"\", 0)\n qu.update(current, 0)\n costs = {}\n parents = {}\n parents[problem.getStartState()] = (problem.getStartState(), \"\")\n\n while not qu.isEmpty():\n cost, current= qu.pop()\n visited.add(current[0])\n\n if problem.isGoalState(current[0]):\n result = current[0]\n break\n\n for each in problem.getSuccessors(current[0]):\n if each[0] not in visited:\n qu.update(each, cost+each[2])\n if each[0] not in costs:\n costs[each[0]] = cost+each[2]\n parents[each[0]] = (current[0], each[1])\n elif costs[each[0]] > cost+each[2]:\n costs[each[0]] = cost + each[2]\n parents[each[0]] = (current[0], each[1])\n\n path = []\n while parents[result][0] != result:\n path.append(parents[result][1])\n result = parents[result][0]\n\n path.reverse()\n result = []\n for each in path:\n if each == \"South\":\n result.append(s)\n elif each == \"West\":\n result.append(w)\n elif each == \"North\":\n result.append(n)\n elif each == \"East\":\n result.append(e)\n\n return result\n util.raiseNotDefined()", "def search(world_state, robot_pose, goal_pose):\n if world_state.shape[0] == 0 or world_state.shape[1] == 0:\n print(\"Error, empty world_state!!!\")\n return None\n if not is_pos_valid(robot_pose, world_state.shape):\n print(\"Error, invalid robot_pose!!!\", robot_pose)\n return None\n if not is_pos_valid(goal_pose, world_state.shape):\n print(\"Error, invalid goal_pose!!!\", goal_pose)\n return None\n\n directions = [(-1, 0), (1, 0), (0, -1), (0, 1)] # orthogonal directions\n found = False\n\n x, y = robot_pose\n g = 0\n h = heuristic(robot_pose, goal_pose)\n f = g + h\n open = [[f, x, y]]\n came_from = {}\n came_from[robot_pose] = None\n cost_so_far = {}\n cost_so_far[robot_pose] = 0\n\n while open:\n open.sort() # sort based on f value\n current = open.pop(0)\n\n x, y = current[1:]\n g = cost_so_far[(x, y)]\n\n if (x, y) == goal_pose:\n found = True\n break\n else:\n # find available next positions\n for direction in directions:\n x2 = x + direction[0]\n y2 = y + direction[1]\n\n # check whether x2 and y2 are valid\n if not is_pos_valid((x2, y2), world_state.shape):\n continue\n\n g2 = g + 1\n if world_state[x2, y2] == 0 and ((x2, y2) not in cost_so_far or g2 < cost_so_far[(x2, y2)]):\n\n h2 = heuristic((x2, y2), goal_pose)\n f2 = g2 + h2\n open.append([f2, x2, y2])\n came_from[(x2, y2)] = (x, y)\n cost_so_far[(x2, y2)] = g2\n if found:\n path = [goal_pose]\n current = goal_pose\n while came_from[current]:\n current = came_from[current]\n path.append(current)\n\n path.reverse()\n return path\n\n else:\n return None", "def astar_multi(maze):\n heuristic_lookup = {} \n objs = maze.getObjectives()\n corner_list = maze.getObjectives()\n start = maze.getStart()\n path = []\n dim = maze.getDimensions()\n visited = {}\n lookup_table = {}\n p_queue = []\n edgeset = []\n mintree = {}\n start_heuristic = 0 + multi_dot_heuristic_query(maze, start, objs, edgeset, mintree) \n heuristic_lookup[(start, tuple(objs))] = start_heuristic\n start_state = state(start, corner_list)\n lookup_table[state(start, corner_list)] = (start_heuristic, 0, state((-2, -2)))\n p_queue.append((start_heuristic, state(start, corner_list)))\n while p_queue:\n pair = p_queue.pop(0)\n visited[pair[1]] = lookup_table.get(pair[1])[2]\n if not pair[1].getlist():\n current_state = pair[1]\n while current_state != start_state:\n path.append(current_state.getpos())\n current_state = visited.get(current_state)\n path.append(start)\n path.reverse()\n return path\n else: \n list_of_neighbors = maze.getNeighbors(pair[1].getpos()[0], pair[1].getpos()[1])\n for coordinates in list_of_neighbors:\n current_state = state(coordinates)\n if coordinates in pair[1].getlist():\n new_list = copy.copy(pair[1].getlist())\n new_list.remove(coordinates)\n current_state = state(coordinates, new_list)\n else:\n current_state = state(coordinates, pair[1].getlist()) \n if current_state in visited:\n continue\n if current_state in lookup_table:\n if (lookup_table.get(current_state)[0], current_state) in p_queue:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist())))\n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree)\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n old_heuristic = lookup_table.get(current_state)[0]\n if heuristic < lookup_table.get(current_state)[0]:\n lookup_table[current_state] = (heuristic, cost, pair[1])\n p_queue.remove((old_heuristic, current_state))\n bisect.insort(p_queue, (heuristic, current_state))\n else:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist())))\n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) \n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n lookup_table[current_state] = (heuristic, cost, pair[1])\n bisect.insort(p_queue, (heuristic, current_state))\n return []", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # Get the start node\n start_state = problem.getStartState()\n print(start_state)\n\n # Define a stack\n plan_stack = util.Queue()\n start_plan = [start_state] # node, cost\n plan_stack.push(start_plan)\n\n # Visited nodes\n visited_nodes = set(start_state)\n\n goal_found = False\n\n while not goal_found:\n # Get the plan from the stack\n plan_to_expand = plan_stack.pop()\n node_to_exp = plan_to_expand[-1]\n all_nxt_nodes = problem.getSuccessors(node_to_exp)\n\n # Traverse through all the next nodes\n for nxt_node in all_nxt_nodes:\n nxt_pos = nxt_node[0]\n\n if nxt_pos in visited_nodes: # Check if node is already visited\n continue\n\n visited_nodes.add(nxt_pos) # Add the node to visited nodes\n nxt_plan = plan_to_expand + [nxt_pos] # add node to the plan\n plan_stack.push(nxt_plan) # push the plan into the stack\n goal_found = problem.isGoalState(nxt_pos) # Check if goal is achieved\n if goal_found:\n break\n \n \n print(goal_found)\n print(nxt_plan)\n\n moves = []\n # Convert plan to moves\n for i in range(len(nxt_plan) - 1):\n for nxt_node in problem.getSuccessors(nxt_plan[i]):\n nxt_pos = nxt_node[0]\n nxt_mv = nxt_node[1]\n if nxt_pos == nxt_plan[i+1]:\n moves.append(nxt_mv)\n break\n \n return moves\n\n \n\n # Calculate the minimum plan cost \n #min_val = float(\"inf\")\n #for one_plan in plan_stack:\n # plan_cost = one_plan[1]\n # if plan_cost < min_val:\n # min_val = plan_cost\n\n ## Expand the nodes with minimum plan cost\n #for one_plan in plan_stack:\n # plan_cost = one_plan[1]\n # if plan_cost == min_val:\n # plan_step = one_plan[0] \n # # Expand the last node of plan\n # last_node = plan_step[end]\n # for nxt_node in problem.getSuccessors(last_node):\n\n\n\n util.raiseNotDefined()", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n\n templist=[]\n explored = set()\n fringe = util.PriorityQueue()\n # state, list of directions till now and the cost is pushed in the stack\n # so that algorithm can explore the node with lowest cost first\n fringe.push((problem.getStartState(),templist),1)\n\n while (not fringe.isEmpty()):\n (currentNode,currDir) = fringe.pop()\n\n if problem.isGoalState(currentNode):\n pathToGoal = currDir\n break\n if not (currentNode in explored):\n explored.add(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n # total cost is cost till now plus cost to the child node\n totalCost = childNode[2]+problem.getCostOfActions(currDir)\n fringe.push((childNode[0],currDir+[childNode[1]]),totalCost)\n\n\n\n\n return pathToGoal;", "def solve_system(A, method):\n # find b vector such that Ax = b\n # with x = [0 1 2 ... size(m)]\n size = A.shape\n true_x = list(xrange(0, size[1]))\n b = A.dot(true_x)\n\n # solve Ax = b and check solution error\n # diretti\n if method in [sla.spsolve, direttolu]:\n x = method(A, b)\n print(\"\\t\" + method.func_name + \" solved \" + \n str(size))\n return x, sol_error(x, true_x)\n\n # iterativi\n else: \n # per accellerare la convergenza dei metodi iterativi\n # dobbiamo passare un precondizionatore (una matrice M,\n # che approssima l'inversa di A)\n # http://osdir.com/ml/python-scientific-user/2011-06/msg00249.html\n try:\n P = sla.spilu(A, drop_tol=1e-5) \n except Exception as err:\n print(\"\\t\", err)\n print(\"\\tPorta le tue sporche matrici singolari altrove...\")\n return None, \"nan\"\n\n M = sla.LinearOperator(size, P.solve)\n\n global current_x\n current_x = None\n try: \n x, status = method(A, \n b, \n tol=1e-16, \n M=M,\n maxiter=500,\n callback=callback_func)\n except Exception:\n print(\"\\t\" + method.func_name + \" converged on \" + str(size))\n return current_x, sol_error(current_x, true_x)\n\n if status != 0:\n print(\"\\t\" + method.func_name + \" DIDN'T converge on \" +\n str(size) + \" in less than 500 iterations\")\n return current_x, sol_error(x, true_x)\n else:\n print(\"\\t\" + method.func_name + \" converged on \" +\n str(size))\n return current_x, sol_error(x, true_x)" ]
[ "0.6767159", "0.67312855", "0.6485659", "0.6303362", "0.62523675", "0.621314", "0.617608", "0.61201537", "0.60680777", "0.60667634", "0.60551065", "0.6045231", "0.60280347", "0.60061914", "0.59879357", "0.5987022", "0.5967305", "0.59623784", "0.59502643", "0.5880169", "0.58558387", "0.58557796", "0.58411324", "0.5827745", "0.5821433", "0.5812439", "0.579256", "0.57916915", "0.5760538", "0.57485735", "0.57409453", "0.5736113", "0.57177144", "0.5710784", "0.5709437", "0.5707078", "0.5705746", "0.5705576", "0.5705429", "0.57041556", "0.570177", "0.5695601", "0.5695046", "0.5694442", "0.5678556", "0.5671503", "0.567024", "0.56700003", "0.5665382", "0.56644785", "0.5650349", "0.56330186", "0.5623488", "0.56214947", "0.56128454", "0.56049496", "0.5601762", "0.55971634", "0.5588198", "0.5588183", "0.5583177", "0.5578004", "0.5570598", "0.557044", "0.556137", "0.55583966", "0.5557176", "0.5554796", "0.5540937", "0.5530776", "0.5529565", "0.5527532", "0.5526356", "0.5522956", "0.5522155", "0.5520693", "0.5515903", "0.55158514", "0.5512699", "0.5509141", "0.55003715", "0.5499512", "0.5497189", "0.5492085", "0.54867595", "0.54792094", "0.5475156", "0.5474905", "0.54722327", "0.5469906", "0.54674643", "0.5460994", "0.54587525", "0.5457752", "0.5456054", "0.5454824", "0.5452848", "0.54514027", "0.5446475", "0.54350936", "0.5433122" ]
0.0
-1
Viterbi algorithm for finding the optimal path. The number of emission probabilities per index can vary and a separate matrix can be specified for each transition.
def viterbi_sparse(p_emit: Sequence[np.ndarray], p_trans: Sequence[np.ndarray]) -> np.ndarray: T = len(p_emit) assert T - 1 == len(p_trans) trellis = [p_emit[0]] states = [None] for t in range(1, T): weighted_scores = trellis[-1][:, None] + p_trans[t - 1] # [x, y] # scores and p_trans broadcasted max_scores = np.amax(weighted_scores, axis=0) # [y] trellis.append(np.add(max_scores, p_emit[t])) # [y] remember highest score of each path states.append(np.argmax(weighted_scores, axis=0)) # [y] remember index of best path assert len(trellis) == T and len(states) == T tokens = [None] * T # [T] tokens[-1] = np.argmax(trellis[-1], axis=0) # [] for t in range(T - 1, 0, -1): tokens[t - 1] = states[t][tokens[t]] # [] return tokens
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def viterbi_path(prior, transmat, observ_likelihood):\n T = observ_likelihood.shape[-1]\n N = observ_likelihood.shape[0]\n\n path = numpy.zeros(T, dtype=numpy.int32)\n global_score = numpy.zeros(shape=(N,T))\n predecessor_state_index = numpy.zeros(shape=(N,T), dtype=numpy.int32)\n\n t = 1\n global_score[:, 0] = prior * observ_likelihood[:, 0]\n # need to normalize the data\n global_score[:, 0] = global_score[:, 0] /sum(global_score[:, 0] )\n \n for t in range(1, T):\n for j in range(N):\n temp = global_score[:, t-1] * transmat[:, j] * observ_likelihood[j, t]\n global_score[j, t] = max(temp)\n predecessor_state_index[j, t] = temp.argmax()\n\n global_score[:, t] = global_score[:, t] / sum(global_score[:, t])\n\n path[T-1] = global_score[:, T-1].argmax()\n \n for t in range(T-2, -1, -1):\n path[t] = predecessor_state_index[ path[t+1], t+1]\n\n return [path, predecessor_state_index, global_score]", "def viterbi(log_emlik, log_startprob, log_transmat, forceFinalState=True):\n N, M = log_emlik.shape # (# timesteps, # states)\n B = np.zeros((N,M))\n V = np.zeros((N,M)) \n\n # initialisation\n V[0,:] = log_startprob + log_emlik[0,:] \n\n # induction\n for t in range(1,N):\n # vectorise\n x = np.tile(V[t-1,:],(M,1)) + log_transmat.T\n V[t,:] = np.max(x, axis=1) + log_emlik[t,:]\n B[t,:] = np.argmax(x, axis=1)\n\n # recover best path, looking for state sequence S that maximises P(S,X|emission probs)\n # TODO if forceFinalState\n end_state = np.argmax(V[N-1,:]) \n \n viterbi_path = [B[N-1,end_state]]\n viterbi_loglik = np.max(V[N-1,:])\n\n s_star = int(end_state)\n for t in range(N-2,-1,-1):\n s_star = int(B[t+1,s_star]) # optimal state at timestep t\n viterbi_path.append(s_star)\n\n assert len(viterbi_path) == N\n\n return viterbi_loglik, viterbi_path[::-1]", "def viterbi(self):\n # initialisation\n self.phi = zeros((self.noOfEmmittingStates+2, self.T + 1))\n self.phi[0,0] = 1.0\n for i in range(1,self.noOfEmmittingStates+2):\n self.phi[i,0] = 0.0\n for t in range(1,self.T+1):\n self.phi[0,t] = 0.0\n self.traceback = zeros((self.noOfEmmittingStates+1, self.T+1))\n\n # main recursion\n for t in range(1, self.T + 1):\n for j in range(1, self.noOfEmmittingStates + 1):\n phiTemp = zeros((self.noOfEmmittingStates + 1, 1))\n for k in range(self.noOfEmmittingStates+1):\n phiTemp[k,0] = self.phi[k,t-1] * self.transitionMatrix[k, j-1]\n self.traceback[j-1,t-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[j, t] = phiTemp.max(0) * self.b[j-1, t-1]\n\n # last column - set states which can't reach term to 0, sub for term\n for j in range(1,self.noOfEmmittingStates + 1):\n if self.transitionMatrix[j,-1] == 0:\n self.phi[j,-1] = 0\n phiTemp = zeros((self.noOfEmmittingStates+1, 1))\n for k in range(self.noOfEmmittingStates + 1):\n phiTemp[k,0] = self.phi[k,-1] * self.transitionMatrix[k,-1]\n self.traceback[-1,-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[-1,-1] = phiTemp.max(0)", "def run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\r\n\r\n # SHAPES \r\n # N = 5, L = 3\r\n # emission_scores = (5,3), trans_scores = (3,3)\r\n # start_scores = (3,), end_scores = (3,)\r\n\r\n # Creating the transition DP matrix\r\n T = [[0 for _ in range(N)] for _ in range(L)]\r\n backpointers = [[0 for _ in range(N)] for _ in range(L)]\r\n\r\n # Filling the first column\r\n for row in range(L):\r\n T[row][0] = emission_scores[0][row] + start_scores[row] # emission_scores matrix is (N X L)\r\n \r\n # Filling the rest of the transition matrix\r\n for col in range(1, N):\r\n for row in range(L):\r\n prev_list = []\r\n for prev_label in range(L):\r\n prev_list.append(trans_scores[prev_label, row] + T[prev_label][col-1])\r\n T[row][col] = max(prev_list) + emission_scores[col][row] \r\n backpointers[row][col] = np.argmax(prev_list)\r\n\r\n # Filling the last column\r\n for row in range(L):\r\n T[row][N-1] += end_scores[row]\r\n\r\n # print for debug\r\n # print \"T\"\r\n # for i in T:\r\n # print i\r\n \r\n # print \r\n # print\r\n\r\n # print \"B\"\r\n # for i in backpointers:\r\n # print i\r\n\r\n # Finding max score in last column of T matrix\r\n T = np.array(T)\r\n score = np.asscalar(np.max(T[:,N-1]))\r\n location = np.asscalar(np.argmax(T[:,N-1]))\r\n\r\n # Getting best sequence from right to left using backpointers\r\n y = [location]\r\n for col in range(N-1, 0, -1):\r\n y.insert(0, backpointers[location][col])\r\n location = backpointers[location][col]\r\n\r\n '''\r\n y = []\r\n for i in xrange(N):\r\n # stupid sequence\r\n y.append(i % L)\r\n # score set to 0\r\n return (0.0, y)\r\n '''\r\n return (score, y)", "def Viterbi(_sentence, _model, _emission_df, _transition_df):\n\n if not _sentence:\n return []\n\n # EXECUTE VITERBI\n states = [state for state, _ in _model.y_count.items()]\n states.remove('__START__')\n states.remove('__STOP__')\n\n # keep table of values\n # (len(states) x len(sentence))\n value_table = [[0 for x in range(len(_sentence) + 1)] for y in range(len(states))]\n\n # keep table of sequences\n sequence_table = [[[] for x in range(len(_sentence))] for y in range(len(states))]\n\n # base case - START to all states\n for i in range(len(states)):\n # transition prob from __START__ to anything\n try:\n transition_prob = _transition_df[('__START__', states[i])]\n except KeyError:\n transition_prob = 0.0\n\n # error occurs here due to empty _sentence\n try:\n emission_prob = _emission_df[(_sentence[0], states[i])]\n except KeyError:\n emission_prob = 0.0\n\n value_table[i][0] = float(transition_prob) * float(emission_prob)\n sequence_table[i][0] = ['__START__', states[i]]\n\n # iterative/recursive case - state to state\n for i in range(1, len(_sentence)):\n\n # storage for prev\n prev_optimal = 0.0\n prev_state_seq = []\n\n for j in range(len(states)):\n try:\n # find e(xi|yj)\n emission_prob = float(_emission_df[(_sentence[i], states[j])])\n except KeyError:\n emission_prob = 0.0\n\n if prev_optimal == 0.0:\n # find optimal from state to state prob\n for k in range(len(states)):\n test_opti = float(value_table[k][i-1])\n if test_opti >= prev_optimal:\n prev_optimal = test_opti\n prev_state_seq = sequence_table[k][i-1]\n\n # given prev optimal, calculate transition prob\n try:\n # find transition prob from prev optimal state to current\n transition_prob = float(_transition_df[(prev_state_seq[-1], states[j])])\n except KeyError:\n transition_prob = 0.0\n\n prob = prev_optimal * transition_prob * emission_prob\n next_state_seq = prev_state_seq + [states[j]]\n\n value_table[j][i] = prob\n sequence_table[j][i] = next_state_seq\n\n # end case - all states to __STOP__\n for i in range(len(states)):\n try:\n transition_prob = _transition_df[(states[i], '__STOP__')]\n except KeyError:\n transition_prob = 0.0\n\n value_table[i][-1] = float(transition_prob) * float(value_table[i][-2])\n\n # take optimal from table and return optimal val and sequence\n max_val = 0\n result_seq = []\n for i in range(len(states)):\n prob = float(value_table[i][-1]) # take all from last\n if max_val == 0 or prob > max_val:\n max_val = prob\n result_seq = sequence_table[i][-1]\n\n return result_seq[1:]", "def viterbi_path_log(prior, transmat, observ_likelihood):\n T = observ_likelihood.shape[-1]\n N = observ_likelihood.shape[0]\n\n path = numpy.zeros(T, dtype=numpy.int32)\n global_score = numpy.zeros(shape=(N,T))\n predecessor_state_index = numpy.zeros(shape=(N,T), dtype=numpy.int32)\n\n t = 1\n global_score[:, 0] = prior + observ_likelihood[:, 0]\n # need to normalize the data\n \n for t in range(1, T):\n for j in range(N):\n temp = global_score[:, t-1] + transmat[:, j] + observ_likelihood[j, t]\n global_score[j, t] = max(temp)\n predecessor_state_index[j, t] = temp.argmax()\n\n path[T-1] = global_score[:, T-1].argmax()\n \n for t in range(T-2, -1, -1):\n path[t] = predecessor_state_index[ path[t+1], t+1]\n\n return [path, predecessor_state_index, global_score]", "def viterbi(prob_matrix):\n TINY = 1e-6 # to avoid NaNs in logs\n\n # if prob_matrix is 1D, make it 2D\n if len(np.shape(prob_matrix)) == 1:\n prob_matrix = [prob_matrix]\n \n length = len(prob_matrix)\n\n probs = np.zeros_like(prob_matrix)\n backpt = np.ones_like(prob_matrix, dtype=np.int32) * -1\n \n for i in [0,1,2,3,4]:\n probs[0][i] = np.log(prob_matrix[0][i]+TINY)\n \n # {B, M, E, S} <=== 0:begin, 1:middle, 2:end, 3:single\n for t in range(1, length):\n # E, S -> B | B, M -> M | B, M -> E | E, S -> S\n previous_of = [[0,0], [3,4], [1,2], [1,2], [3,4]]\n for i in range(5):\n prevs = previous_of[i]\n max_id = prevs[np.argmax([probs[t-1][prevs[0]], probs[t-1][prevs[1]]])]\n backpt[t][i] = max_id\n probs[t][i] = np.log(prob_matrix[t][i]+TINY) + probs[t-1][max_id]\n\n seq = np.ones(length, 'int32') * -1\n #print(probs[length-1])\n seq[length-1] = np.argmax(probs[length-1])\n #print(seq[length-1])\n max_prob = probs[length-1][seq[length-1]]\n for t in range(1, length):\n seq[length-1-t] = backpt[length-t][seq[length-t]]\n \n return seq", "def pathProb(self, path):\n # Establish initial state distribution.\n estState = []\n for s in range(self.P):\n estState.append(self.initial(path[0][0], s))\n logProb = 0\n for step in range(1, len(path)):\n # Calculate a softmax probability that the agent uses each alpha\n # vector, then sort by action.\n lastF = path[step-1][0]\n lastP = path[step-1][1]\n thisF = path[step][0]\n thisP = path[step][1]\n\n # These are log probs.\n actionProbs = [0.0]*self.A\n totalWeight = float('-inf')\n maxScore = float('-inf')\n for action in range(self.A):\n score = self.valueLookAhead(lastF, estState, action)\n maxScore = max(score, maxScore)\n actionProbs[action] = self.tau * score\n totalWeight = logAdd(totalWeight, self.tau * score)\n # Tally up the probability that the agent goes to the correct state.\n pTrans = 0\n actionTable = {}\n for action in range(self.A):\n nextSTable = self.trans(lastF, lastP)[action]\n if not (thisF, thisP) in nextSTable:\n continue\n pThisAction = nextSTable[(thisF, thisP)] * \\\n math.exp(actionProbs[action] - totalWeight)\n actionTable[action] = pThisAction\n pTrans += pThisAction\n if pTrans == 0:\n return float('-inf')\n logProb += math.log(pTrans)\n\n # Choose which action we are taking.\n for action in actionTable:\n actionTable[action] /= pTrans\n thisAction = randomSample(actionTable) #random!\n\n # Update the agent's guess of the hidden states.\n nextEstState = [0.0]*self.P\n thisObs = randomSample(self.obs(lastF, lastP)) #random!\n for guessP in range(self.P):\n # What is the probability we are in state guessP?\n pGuessP = estState[guessP] * self.obs(lastF, guessP)[thisObs]\n # Given that we are in state guessP, what is the probability that\n # we move to each new state in P?\n newStates = self.trans(lastF, guessP)[thisAction]\n for newState, prob in newStates.iteritems():\n if newState[0] == thisF:\n nextEstState[newState[1]] += pGuessP * prob\n # Normalize nextEstState.\n estState = [i/sum(nextEstState) for i in nextEstState]\n return logProb", "def viterbi(seq, emission_mat, transition_mat, k_counter):\r\n k_dim = k_counter + NOT_MOTIF_STATES\r\n N = len(seq)\r\n prob_mat = wrap_log(np.zeros([k_dim, N]))\r\n trace_mat = np.zeros([k_dim, N])\r\n prob_mat[0, 0] = wrap_log(1)\r\n for j in range(1, N):\r\n curr_letter = prob_mat[:, j - 1].reshape((-1, 1))\r\n potential_trans = curr_letter + transition_mat\r\n max_values = np.max(potential_trans, axis=0).T\r\n trace_mat[:, j] = np.argmax(potential_trans, axis=0).T\r\n prob_mat[:, j] = max_values + emission_mat[:, emission_dict[seq[j]]]\r\n # begin trace\r\n motif_order = EMPTY_STRING\r\n curr_k = int(np.argmax(prob_mat[:, -1]))\r\n for j in range(N - 1, -1, -1):\r\n last_motif_state = k_dim - STATES_AT_END\r\n if FIRST_MOTIF_STATE <= curr_k <= last_motif_state:\r\n motif_order = MOTIF + motif_order\r\n else:\r\n motif_order = BACKGROUND + motif_order\r\n curr_k = int(trace_mat[curr_k, j])\r\n return motif_order[1:-1]", "def viterbi(p_observations_given_state, p_transition, p_initial):\n p_observations_given_state = numpy.asarray(p_observations_given_state)\n p_transition = numpy.asarray(p_transition)\n p_initial = numpy.asarray(p_initial)\n N, S = p_observations_given_state.shape\n assert p_transition.shape in {(S, S), (N-1, S, S)}\n if p_transition.shape == (S, S):\n p_transition = numpy.array([p_transition for i in range(N-1)])\n assert numpy.allclose(numpy.sum(p_transition, axis=2), 1)\n assert p_initial.shape == (S,)\n assert numpy.allclose(numpy.sum(p_initial), 1)\n\n # convert all probabilities to log probabilities so we can sum instead of\n # multiplying, which better controls numerical error.\n err = numpy.seterr(divide='ignore') # allow log(0) to go to -inf, as desired\n lp_observations_given_state = numpy.log(p_observations_given_state)\n lp_transition = numpy.log(p_transition)\n lp_initial = numpy.log(p_initial)\n numpy.seterr(**err)\n\n states = numpy.arange(S)\n # path[i] always contains the maximum likelihood sequence of states ending at state i\n path = [[i] for i in states]\n # lp_state contains the current log probability of being in the state given the sequence\n # of observations thus far considered.\n lp_state = lp_observations_given_state[0] + lp_initial\n\n for lp_obs, lp_trans in zip(lp_observations_given_state[1:], lp_transition):\n # For each observation after the first timepoint, construct an (S, S)\n # shape array where [si, sj] contains the log probability of going from\n # state si to state sj between time t and t+1.\n # Assume we know for each state si prob(si at time t), the probability\n # of being in that state at that time, then we can calculate the probability\n # of being in any given state sj at time t+1:\n # prob(transition from si at time t to sj at time t+1) = prob(si at t) *\n # prob(si->sj between t and t+1) *\n # prob(observation at t+1 given state sj)\n # prob(j at time t+1) = max_i(prob(i at time t -> j at time t+1))\n #\n # Thus we merely need to keep updating our estimates for the probability\n # of being in each state at each time, and keep a list of the path that\n # lead to each state.\n #\n # The actual code in use is 100% equivalent to the code below; however it\n # is rather more efficient.\n #\n # lp_transition_t = numpy.zeros((s, s), dtype=float)\n # new_path = []\n # lp_state = []\n # for s_to in states:\n # best_from_lp = -numpy.inf\n # for s_from in states:\n # lp_transition_t[s_from, s_to] = lp_state[s_from] + lp_trans[s_from, s_to] + lp_obs[s_to]\n # if lp_transition_t[s_from, s_to] > best_from_lp:\n # best_from = s_from\n # best_from_lp = lp_transition_t[s_from, s_to]\n # lp_state.append(best_from_lp)\n # new_path.append(path[best_from] + [s_to])\n # path = new_path\n lp_transition_t = lp_state[:,numpy.newaxis] + lp_trans + lp_obs[numpy.newaxis,:]\n best_from = numpy.argmax(lp_transition_t, axis=0)\n path = [path[s_from]+[s_to] for s_to, s_from in enumerate(best_from)]\n lp_state = lp_transition_t[best_from, states]\n last_state = numpy.argmax(lp_state)\n return numpy.array(path[last_state])", "def viterbi(self, hmm, initial, emissions):\n probabilities = hmm.emission(emissions[0]) * initial\n stack = []\n \n for emission in emissions[5:]:\n trans_probabilities = hmm.transition_probabilities * np.row_stack(probabilities) #Matrix for transition probabilities\n max_col_ixs = np.argmax(trans_probabilities, axis=0)\n probabilities = hmm.emission(emission) * trans_probabilities[max_col_ixs, np.arange(hmm.num_states)] #Probabilities\n stack.append(max_col_ixs) #Store the axis and the data in the stack\n state_seq = [np.argmax(probabilities)] #Store the resulted probabilities\n\n while stack:\n max_col_ixs = stack.pop() #Take out the top data store in stack\n state_seq.append(max_col_ixs[state_seq[-1]])\n state_seq.reverse()\n return state_seq", "def viterbi(adj_matrix, label_sequence, starting_vertex):\n\n assert adj_matrix, \"adj_matrix is None or empty.\"\n n = len(adj_matrix) # vertex count.\n for row in adj_matrix:\n assert len(row) == n, \"adj_matrix is not square.\"\n\n assert 0 <= starting_vertex <= n - 1, \"starting_vertex out of range.\"\n\n assert label_sequence, \"label_sequence is None or empty.\"\n k = len(label_sequence)\n for l in label_sequence:\n assert isinstance(l, int) and l > 0, \"label ids must be positive integers.\"\n\n p = [[0 for _ in range(0, k)] for _ in range(0, n)]\n for j in range(k - 1, -1, -1):\n for beg in range(0, n):\n for end in range(0, n):\n if not adj_matrix[beg][end]: # No edge from i to r.\n continue\n\n assert isinstance(adj_matrix[beg][end], AdjMatrixElem),\\\n \"adj_matrix[%d][%r] is not an AdjMatrixElem\" % (beg, end)\n elem = adj_matrix[beg][end]\n if elem.label_id != label_sequence[j]:\n continue\n\n later_prob = 1 if j == k - 1 else p[end][j + 1]\n if elem.probability * later_prob > p[beg][j]:\n p[beg][j] = elem.probability * later_prob\n\n if round(p[starting_vertex][0] - 0.0, PROBABILITY_PRECISION) == 0:\n return 0, NO_SUCH_PATH\n\n path = [starting_vertex]\n for j in range(0, k):\n beg = path[j]\n for end in range(0, n):\n later_prob = 1 if j == k - 1 else p[end][j + 1]\n if adj_matrix[beg][end] and adj_matrix[beg][end].label_id == label_sequence[j]\\\n and round(p[beg][j] - adj_matrix[beg][end].probability * later_prob, PROBABILITY_PRECISION) == 0:\n path.append(end)\n break\n\n return p[starting_vertex][0], tuple(path)", "def Modified_Viterbi(_sentence, _model, _emission_df, _transition_df, _2nd_order_df):\n\n if not _sentence:\n return []\n\n # EXECUTE VITERBI\n states = [state for state, _ in _model.y_count.items()]\n states.remove('__START__')\n states.remove('__STOP__')\n\n # keep table of values\n # (len(states) x len(sentence))\n value_table = [[0 for x in range(len(_sentence) + 1)] for y in range(len(states))]\n\n # keep table of sequences\n sequence_table = [[[] for x in range(len(_sentence))] for y in range(len(states))]\n\n # base case - START to all states, 1st order.\n # 2nd order not possible for base case\n for i in range(len(states)):\n # use 1st order, since 2nd order is non-existent\n # transition prob from __START__ to anything\n try:\n # find transition from start to state\n transition_prob = _transition_df[('__START__', states[i])]\n except KeyError:\n transition_prob = 0.0\n\n # error occurs here due to empty _sentence\n try:\n # Find emission of word from state\n emission_prob = _emission_df[(_sentence[0], states[i])]\n except KeyError:\n emission_prob = 0.0\n\n value_table[i][0] = float(transition_prob) * float(emission_prob)\n sequence_table[i][0] = ['__START__', states[i]]\n\n # iterative/recursive case - 2nd order\n # loop through rest of words in sentence\n for i in range(1, len(_sentence)):\n\n # storage for prev\n prev_optimal = 0.0\n prev_state_seq = []\n\n # loop through states for every word\n for j in range(len(states)):\n try:\n # find e(xi|yj), prob emitting word from current state\n emission_prob = float(_emission_df[(states[j], _sentence[i])])\n except KeyError:\n emission_prob = 0\n\n # find prev_optimal\n if prev_optimal == 0.0:\n for k in range(len(states)):\n test_optimal = float(value_table[k][i-1])\n if test_optimal >= prev_optimal:\n prev_optimal = test_optimal\n prev_state_seq = sequence_table[k][i-1]\n\n prev_1 = prev_state_seq[-1]\n prev_2 = prev_state_seq[-2]\n\n # use 2nd order here - modified\n try:\n transition_prob = float(_2nd_order_df[((prev_2, prev_1), states[j])])\n except KeyError:\n transition_prob = 0.0\n\n prob = prev_optimal * transition_prob * emission_prob\n next_state_seq = prev_state_seq + [states[j]]\n\n value_table[j][i] = prob\n sequence_table[j][i] = next_state_seq\n\n # end case - all states to __STOP__\n for i in range(len(states)):\n prev_state_seq = sequence_table[i][-1]\n prev_1 = prev_state_seq[-1]\n prev_2 = prev_state_seq[-2]\n try:\n transition_prob = float(_2nd_order_df[((prev_2, prev_1), '__STOP__')])\n except KeyError:\n transition_prob = 0.0\n\n value_table[i][-1] = float(transition_prob) * float(value_table[i][-2])\n\n max_val = 0\n result_seq = []\n for i in range(len(states)):\n prob = float(value_table[i][-1]) # take all from last\n if max_val == 0 or prob > max_val:\n max_val = prob\n result_seq = sequence_table[i][-1]\n\n return result_seq[1:]", "def viterbi(self, idx1_p, idx2_p, idx1_t, idx2_t):\r\n\r\n # length of each string\r\n len_a = idx2_t - idx1_t + 1\r\n len_b = idx2_p - idx1_p + 1\r\n\r\n # dp table\r\n # its contents is edit distance, dx, dy to the best previous path\r\n dp = [[(0, -1, -1) for x in range(len_a+1)] for y in range(len_b+1)]\r\n\r\n # initialize first row and first column\r\n dp[0][0] = [0, 0, 0]\r\n for i in range(1, len_a+1):\r\n dp[0][i] = [i, 0, -1]\r\n\r\n for i in range(1, len_b+1):\r\n dp[i][0] = [i, -1, 0]\r\n\r\n # dp update\r\n for i in range(1, len_b+1):\r\n for j in range(1, len_a+1):\r\n index_a = j-1\r\n index_b = i-1\r\n\r\n cost = self._compute_cost(self.pattern[idx1_p+index_b][0], self.text[idx1_t+index_a][0].lower())\r\n cost_p = 1\r\n cost_t = 1\r\n \r\n dx = 0\r\n dy = 0\r\n mincost = 0\r\n\r\n if dp[i-1][j][0] < dp[i][j-1][0]:\r\n dx = -1\r\n dy = 0\r\n mincost = dp[i-1][j][0] + cost_p\r\n else:\r\n dx = 0\r\n dy = -1\r\n mincost = dp[i][j-1][0] + cost_t\r\n\r\n if dp[i-1][j-1][0] + cost < mincost:\r\n dx = -1\r\n dy = -1\r\n mincost = dp[i-1][j-1][0] + cost\r\n\r\n dp[i][j] = [mincost, dx, dy]\r\n\r\n # backward to get the best_start_index\r\n cx = len_b\r\n cy = len_a\r\n\r\n # if self.align_output:\r\n # align_right_f = open(\"./output/\"+self.label+\"_align_right\", \"w\")\r\n # align_wrong_f = open(wrong_path, \"w\")\r\n # align_output_f = open('./output/'label+'_align_right', 'w', encoding='utf-8')\r\n # align_output_f = open('./output/'label+'_align_wrong', 'w', encoding='utf-8')\r\n # right_align = dict()\r\n # wrong_align = dict()\r\n\r\n while cy != 0:\r\n dx = dp[cx][cy][1]\r\n dy = dp[cx][cy][2]\r\n\r\n if dx == 0 and dy == -1: # deal with deletion\r\n self.text[idx1_t+cy-1][1] = 0\r\n else:\r\n self.text[idx1_t+cy-1][1] = self.pattern[idx1_p+cx-1][1]\r\n self.text[idx1_t+cy-1][2] = self.pattern[idx1_p+cx-1][2]\r\n\r\n # for alignment statistics\r\n # if self.text[idx1_t+cy-1][0] == self.pattern[idx1_p+cx-1][0]:\r\n # word_label = self.text[idx1_t+cy-1][0]\r\n # if word_label in right_align:\r\n # right_align[word_label] += 1\r\n # else:\r\n # right_align[word_label] = 1\r\n # else:\r\n if self.text[idx1_t+cy-1][0] != self.pattern[idx1_p+cx-1][0]:\r\n text_label = self.text[idx1_t+cy-1][0]\r\n pattern_label = self.pattern[idx1_p+cx-1][0]\r\n # if (text_label, pattern_label) in wrong_align:\r\n # wrong_align[(text_label, pattern_label)] += 1\r\n # else:\r\n # wrong_align[(text_label, pattern_label)] = 1\r\n\r\n\r\n cx += dx\r\n cy += dy\r\n\r\n # right_align_s = sorted(right_align.items(), key=operator.itemgetter(1), reverse=True)\r\n # wrong_align_s = sorted(wrong_align.items(), key=operator.itemgetter(1), reverse=True)\r\n\r\n # for r in right_align_s:\r\n # align_right_f.write(\"{}\\t{}\\n\".format(r[0], r[1]))\r\n # for w in wrong_align_s:\r\n # align_wrong_f.write(\"{}\\t{}\\t{}\\n\".format(w[0][0], w[0][1], w[1]))\r\n\r\n # align_right_f.close()\r\n # align_wrong_f.close()\r\n\r\n return self.text", "def find_min_hamiltonian_path(G,weights,probs_instead_of_weights=False):\n\n # Create a new model\n m = Model(\"hamiltonian_cycle\")\n \n # Create variables\n x_vars = {}\n u_vars = {}\n for var1 in permute(G.vertices()):\n for var2 in permute(G.vertices()):\n if var1 != var2:\n x_vars[(var1,var2)] = m.addVar(vtype='B', name=\"x_\"+str(var1)+'_'+str(var2))\n u_vars[var1] = m.addVar(vtype=GRB.INTEGER, name=\"u_\"+str(var1))\n m.update()\n \n for var in G.vertices():\n if var != START_NODE:\n cur_incoming = LinExpr([(1.0,v) for k,v in x_vars.items() if (k[1] == var)])\n #print(cur_incoming)\n m.addConstr(cur_incoming,GRB.EQUAL,1.0)\n \n if var != END_NODE:\n cur_outgoing = LinExpr([(1.0,v) for k,v in x_vars.items() if (k[0] == var)])\n #print(cur_outgoing)\n m.addConstr(cur_outgoing,GRB.EQUAL,1.0)\n \n for var1 in G.vertices():\n for var2 in G.vertices():\n if var1 != var2:\n c = LinExpr([(1.0,u_vars[var1]),(-1.0,u_vars[var2]),(G.num_vertices(),x_vars[(var1,var2)])])\n #print(c)\n m.addConstr(c,GRB.LESS_EQUAL,G.num_vertices()-1)\n \n # Set objective\n #try:\n edge_weights = permute(G.get_edge_weights(weights))\n if probs_instead_of_weights:\n all_probs = []\n for v in G.vertices():\n if v != END_NODE:\n batch_scores = [(e,w) for e,w in edge_weights if e[0] == v]\n S = logsumexp([x[1] for x in batch_scores])\n batch_scores = [(e,np.exp(w-S)) for e,w in batch_scores]\n all_probs.extend(batch_scores)\n edge_weights = all_probs\n objective = LinExpr([(weight,x_vars[edge]) for edge,weight in edge_weights])\n #except TypeError:\n # return None\n \n m.setObjective(objective,GRB.MINIMIZE)\n m.update()\n code = m.optimize()\n \n try:\n return [k for k,v in x_vars.items() if v.x > 0.98]\n except GurobiError:\n return None", "def run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\r\n\r\n #T - Score matrix same as in assignement pdf\r\n T = np.zeros(shape=(L,N))\r\n #Back pointers - to store the previous best tag for word at (i-1)th position\r\n #that resulted into current best tag for (i)th word \r\n back_pointer = np.full((L,N), -1)\r\n\r\n for i in xrange(L):\r\n emission = emission_scores[0][i]\r\n combined = emission + start_scores[i]\r\n T[i][0] = combined\r\n\r\n # Loop over all the words in a sequesnce\r\n for i in xrange(1, N):\r\n # Loop over all the tags for the word at index i \r\n for j in xrange(L):\r\n # Varibale for maximum tag score from previous word (word at i-1)\r\n tmp_max = float('-inf')\r\n tmp_max_idx = -1\r\n #Emission value of word at idx i from state (i.e tag) j\r\n emission = emission_scores[i][j]\r\n #Loop over all the possibile tags for previous word T[tag (1..L), word at i-1]\r\n #and get max among them. Store the corresponding back pointer for there T[tag (1..L), word at i-1]\r\n for k in xrange(L):\r\n transition = trans_scores[k][j]\r\n prev_path = T[k][i-1]\r\n combined = transition + prev_path\r\n if (tmp_max < combined):\r\n tmp_max = combined\r\n tmp_max_idx = k\r\n\r\n back_pointer[j][i] = tmp_max_idx\r\n T[j][i] = tmp_max + emission\r\n\r\n # Doing this step outside because if N == 1 then above loop will not run\r\n # Variable for maximum tag score\r\n tag_max = float('-inf')\r\n # Variable for back pointer(previous T[tag, word])\r\n tag_max_idx = -1\r\n for i in xrange(L):\r\n T[i][N-1] = T[i][N-1] + end_scores[i]\r\n if (tag_max < T[i][N-1]):\r\n tag_max = T[i][N-1]\r\n tag_max_idx = i\r\n # print(\"Max tag -> \" + str(tag_max_idx))\r\n\r\n #Variable to track the path length - should be equal to N\r\n path_length = 0\r\n #Variable to back track on the tags\r\n tag_idx = tag_max_idx\r\n #Varibale to track the word index in N\r\n word_idx = N-1 \r\n #Path strored using backtracking\r\n y = []\r\n\r\n #Getting the best path using backtracking on back_pointers\r\n while path_length != N-1:\r\n y.append(back_pointer[tag_idx][word_idx])\r\n tag_idx = back_pointer[tag_idx][word_idx]\r\n word_idx = word_idx - 1\r\n path_length = path_length + 1\r\n\r\n #Reversing the backtracked path\r\n y = y[::-1]\r\n #Adding the tag for the last word idx in N\r\n y.append(tag_max_idx)\r\n # print(\"Path -> \" + str(y))\r\n\r\n return (tag_max, y)", "def run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\n \r\n trans_scores += start_scores\n back_ptrs = np.zeros_like(emission_scores,dtype=np.int32)\n emission_scores += start_scores\r\n em_scores = np.zeros_like(emission_scores)\n em_scores[0] = start_scores+emission_scores[0]\n \n for k in range(1,N):\n transition_plus_score =trans_scores+np.expand_dims(em_scores[k-1],1)\n back_ptrs[k] =np.argmax(transition_plus_score,0)\n em_scores[k] =np.max(transition_plus_score,0)+emission_scores[k]\n \n v = [np.argmax(end_scores+em_scores[-1])]\n v_score = np.max(end_scores+em_scores[-1])\n\n for back_ptr in reversed(back_ptrs[1:]):\n v.append(back_ptr[v[-1]])\n v.reverse()\n return v_score,v", "def generate_aima_grid():\n\n # https://stats.stackexchange.com/questions/339592/how-to-get-p-and-r-values-for-a-markov-decision-process-grid-world-problem\n\n actions_map = {0: '^', 1: 'V', 2: '<', 3: '>'}\n\n transitions = np.zeros((4, 12, 12)) # (A, S, S)\n transitions[0] = [[0.9, 0.1, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0.1, 0.8, 0.1, 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0.1, 0.8, 0.1, 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0.8, 0., 0., 0., 0.2, 0., 0., 0., 0., 0., 0., 0.],\n [0., 0.8, 0., 0., 0.1, 0., 0.1, 0., 0., 0., 0., 0.],\n [0., 0., 0.8, 0., 0., 0., 0.1, 0.1, 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.8, 0., 0., 0., 0.1, 0.1, 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0.1, 0.8, 0.1, 0.],\n [0., 0., 0., 0., 0., 0., 0.8, 0., 0., 0.1, 0., 0.1],\n [0., 0., 0., 0., 0., 0., 0., 0.8, 0., 0., 0.1, 0.1]]\n\n transitions[1] = [[0.1, 0.1, 0., 0., 0.8, 0., 0., 0., 0., 0., 0., 0.],\n [0.1, 0.8, 0.1, 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0.1, 0., 0.1, 0., 0., 0.8, 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.2, 0., 0., 0., 0.8, 0., 0., 0.],\n [0., 0., 0., 0., 0.1, 0., 0.1, 0., 0., 0.8, 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.1, 0.1, 0., 0., 0.8, 0.],\n [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0.9, 0.1, 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0.1, 0.8, 0.1, 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.1, 0.8, 0.1],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.1, 0.9]]\n\n transitions[2] = [[0.9, 0., 0., 0., 0.1, 0., 0., 0., 0., 0., 0., 0.],\n [0.8, 0.2, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0.8, 0.1, 0., 0., 0., 0.1, 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0.1, 0., 0., 0., 0.8, 0., 0., 0., 0.1, 0., 0., 0.],\n [0., 0.1, 0., 0., 0.8, 0., 0., 0., 0., 0.1, 0., 0.],\n [0., 0., 0.1, 0., 0., 0., 0.8, 0., 0., 0., 0.1, 0.],\n [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.1, 0., 0., 0., 0.9, 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0.8, 0.2, 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.1, 0., 0., 0.8, 0.1, 0.],\n [0., 0., 0., 0., 0., 0., 0., 0.1, 0., 0., 0.8, 0.1]]\n\n transitions[3] = [[0.1, 0.8, 0., 0., 0.1, 0., 0., 0., 0., 0., 0., 0.],\n [0., 0.2, 0.8, 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0.1, 0.8, 0., 0., 0.1, 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0.1, 0., 0., 0., 0.8, 0., 0., 0., 0.1, 0., 0., 0.],\n [0., 0.1, 0., 0., 0., 0., 0.8, 0., 0., 0.1, 0., 0.],\n [0., 0., 0.1, 0., 0., 0., 0., 0.8, 0., 0., 0.1, 0.],\n [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.1, 0., 0., 0., 0.1, 0.8, 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.2, 0.8, 0.],\n [0., 0., 0., 0., 0., 0., 0.1, 0., 0., 0., 0.1, 0.8],\n [0., 0., 0., 0., 0., 0., 0., 0.1, 0., 0., 0., 0.9]]\n\n rewards = np.asarray((-0.02, -0.02, -0.02, 1, -0.02, -0.02, -0.02, -1, -0.02, -0.02, -0.02, -0.02))\n\n print('\\n***** aima grid world *****\\n')\n print('Transition matrix:', transitions.shape)\n print('Reward matrix:', rewards.shape)\n\n return transitions, rewards", "def time_path_iteration(params=params, S=3, T=50, weight=0.3, tol=1e-12, maxiter=100):\n ss_output = get_SS()\n b_ss = ss_output['b_ss']\n b_init = np.array([0, 0.8 * b_ss[0], 1.1 * b_ss[1]]) # t=0\n\n # Guess transition path, finishes at steady_state\n Kguess = np.linspace(b_init.sum(), ss_output['K_ss'], T)\n\n s = 1\n K_dynamic = Kguess\n b_current = np.zeros((S,T)) # initialize array to store savings decisions\n b_current[:,0] = b_init\n\n # Update b_path until convergence\n its = 0\n ee_diff = 7.0\n while ee_diff > tol and its < maxiter:\n its += 1\n w_dynamic = find_w(L=params['labor_supply'].sum(), K=K_dynamic)\n r_dynamic = find_r(L=params['labor_supply'].sum(), K=K_dynamic)\n for t in range(T-2):\n\n #solve for b32, savings decision of middle-aged in first period\n ee_param = (w_dynamic, r_dynamic, params['labor_supply'], b_current[:,t], s, t)\n b_current[s+1,t+1] = opt.root(ee_err_1, 0, args=ee_param).x\n\n # solve for b22, b33, savings decision of young gen in middle/old generations\n ee_param = (w_dynamic, r_dynamic, params['labor_supply'], b_init, s, t)\n b_current[s,t+1], b_current[s+1, t+2]= opt.root(ee_err_23, [0,0], args=ee_param).x\n # fill in table\n b_current[s,T-1] = b_current[s,T-2]\n\n # Check for convergence\n K_prime = b_current.sum(axis=0)\n ee_diff = (K_prime - K_dynamic).max()\n\n# rc_diff = production(K_prime, L=params['labor_supply'].sum())\n# - Ct = (1 + r_dynamic) * ()\n# - np.roll(K_prime, len(K_prime)-1)\n# - (1 - delta) * K_prime\n\n print('Iteration number: ', its, 'Current EE difference: ', ee_diff)\n # update new capital path\n K_dynamic = weight * K_prime + (1-weight) * K_dynamic\n\n fig, ax = plt.subplots(1,1,figsize=(8,6))\n plt.plot(range(T), Kguess, 'r--',lw=0.7, label='Kguess')\n plt.plot(range(T), K_dynamic , label='Capital Path Solution')\n plt.title('Transition Path of Aggregate Capital')\n plt.xlabel('Time period')\n plt.ylabel('Aggregate Capital')\n plt.legend()\n\n fig, ax = plt.subplots(1,1,figsize=(8,6))\n plt.plot(range(T), r_dynamic, 'g-o',label='Interest rate Path Solution')\n plt.title('Transition Path of Aggregate Interest rate')\n plt.xlabel('Time period')\n plt.ylabel('Interest Rate')\n plt.legend()\n\n fig, ax = plt.subplots(1,1,figsize=(8,6))\n plt.plot(range(T), w_dynamic, 'k-o',label='Wage Path Solution')\n plt.title('Transition Path of Wages')\n plt.xlabel('Time period')\n plt.ylabel('Wages')\n plt.legend()\n\n return K_dynamic", "def IteratePaths(self):\n self.w = self.setwage(self.K, self.N)\n self.r = self.setrate(self.K, self.N)\n self.b = self.benefit(self.N)\n\n a1, aT = [-1,], []\n\n for q in range(self.Nq):\n if q == 0:\n self.apath[-1] = 0.2\n elif q == 1:\n self.apath[-1] = 0.3\n else:\n self.apath[-1] = max(0,aT[-1]-(aT[-1]-aT[-2])*a1[-1]/(a1[-1]-a1[-2]))\n \n self.npath[-1] = 0\n self.cpath[-1] = self.apath[-1]*(1+self.r) + self.b\n\n for y in range(-2,-(self.T+1),-1): # y = -2, -3,..., -60\n self.apath[y], self.npath[y], self.cpath[y] = self.DirectSolve(y)\n\n aT.append(self.apath[-1])\n a1.append(self.apath[-self.T])\n if (fabs(self.apath[-self.T])<self.tol):\n break\n for y in range(-1,-(self.T+1),-1):\n self.upath[y] = self.util(self.cpath[y],self.npath[y])", "def compute_transition_matrix(M, triang_distr):\n prob_self = 0.99\n \n A = np.zeros((2*M, 2*M))\n max_step = len(triang_distr) // 2\n\n for i in range(M):\n if i < max_step:\n A[i, 0:i+max_step] = prob_self * triang_distr[max_step - i:-1] / np.sum(triang_distr[max_step - i:-1])\n A[i+M, M:i+M+max_step] = prob_self * triang_distr[max_step - i:-1] / np.sum(triang_distr[max_step - i:-1])\n\n if i >= max_step and i < M-max_step:\n A[i, i-max_step:i+max_step+1] = prob_self * triang_distr\n A[i+M, (i+M)-max_step:(i+M)+max_step+1] = prob_self * triang_distr\n\n if i >= M-max_step:\n A[i, i-max_step:M] = prob_self * triang_distr[0:max_step - (i-M)] / np.sum(triang_distr[0:max_step - (i-M)])\n A[i+M, i+M-max_step:2*M] = prob_self * triang_distr[0:max_step - (i - M)] / \\\n np.sum(triang_distr[0:max_step - (i - M)])\n\n A[i, i+M] = 1 - prob_self\n A[i+M, i] = 1 - prob_self\n \n return A", "def viterbi(self, O):\n\n predecessor = numpy.ones([len(O), len(self)], dtype = int) * -1\n delta = numpy.zeros([len(O), len(self)])\n B = numpy.zeros([len(self), len(O)])\n\n for j in range(len(self.S)):\n delta[0, j] = self.log_P[j] + self.S[j].b(O[0])\n\n for t in range(1, delta.shape[0]):\n for j in range(delta.shape[1]):\n #\n _temp_ = delta[t - 1, :] + self.A.log_transitions[:, j]\n #\n _from_ = numpy.argmax(_temp_)\n predecessor[t, j] = _from_\n delta[t, j] = delta[t - 1, _from_] + self.S[j].b(O[t])\n #\n #\n if self.A.force_to_one_terminal_state:\n _best_ = len(delta[-1]) - 1 # According to Transitions.py the terminal state is the last one\n else:\n _best_ = numpy.argmax(delta[-1, :])\n seq = numpy.ones(len(O)) * -1\n t = len(O) - 1\n i = _best_\n while t > 0:\n seq[t] = i\n i = predecessor[t, i]\n t = t - 1\n #\n return delta[-1, _best_], seq", "def viterbi(self, x, do_logging=True, return_omega=False, **args):\n if do_logging:\n logging.debug(\"Started calculating Viterbi path.\")\n N = len(x)\n alpha, c = self.estimate(x, want_alpha=True)\n alpha, c = np.log(alpha), np.log(c)\n alpha = np.array([alpha[n] + c[:n+1].sum() for n in range(N)])\n # ^ log \\alpha (Not \\hat{\\alpha})\n logt, loge = np.log(self._t), np.log(self._e)\n omega = np.log(self._i) + loge[x[0]]\n omega_history = []\n # ^ omega: probability at current position (at position 0 here)\n path = np.array([[i for i in range(self._K)] for n in range(N)])\n # calculate the most probable path at each position of the observation\n for n in range(1, N):\n prob = loge[x[n]] + omega + logt.T\n # NxN matrix row: transition from, col: transition to\n omega = np.max(prob, axis=1)\n omega_history.append(omega)\n # omega is a vector with length N\n path[n] = np.argmax(prob, axis=1)\n # Seek the most likely route (From N-1 to 0)\n route = [np.argmax(omega)]\n omegas = [omega[route[0]]]\n for n in range(N - 2, -1, -1):\n route.append(path[n][route[-1]])\n omegas.append(omega_history[n][route[-1]])\n if do_logging:\n logging.debug(\"Finished calculating Viterbi path.\")\n logging.debug(omega)\n if return_omega:\n return route[::-1], omega.max(), omegas[::-1]\n else:\n return route[::-1], omega.max()", "def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict", "def viterbi(self, e_phi, states_dict):\n \n states = [\"0\",\"1\",\"2\"]\n \n if not states_dict:\n first_dict = {} \n for state in states: \n S_e = self.score(e_phi, state) \n first_dict[state] = (S_e,([]))\n return [first_dict] \n \n else:\n last_dict = states_dict[-1]\n this_dict = {}\n scores = self.scores(e_phi)\n for (state, S_e) in scores.iteritems():\n max_score=-float('inf')\n max_label = None\n for prev in states:\n (Sprev, (Hprev))=last_dict[prev]\n if not Hprev:\n Hstate = [prev] # no history\n else:\n Hstate = Hprev[1:]+[prev] \n t_phi = tfeats(Hstate, self.order)\n \n partial_score = Sprev+self.score(t_phi, state)\n if max_score < partial_score:\n S_max = partial_score\n max_hstate = Hstate\n # write to dict\n this_dict[state]=(S_max+S_e,(max_hstate))# brakets\n states_dict.append(this_dict)\n return states_dict", "def ant_colony(map, alpha=3, beta=4, m=10, rho=0.2, q=1, its_max=20):\n n = len(map)\n tau = np.ones((n, n))\n eta = 1/map.D\n for i in range(n):\n eta[i, i] = 0\n paths_array = np.zeros((m, n), int)\n its = 0\n path_best = np.zeros((its_max, n), int)\n distance_best = np.zeros(its_max)\n\n while its < its_max:\n paths_length = np.zeros(m)\n for i in range(m):\n source = np.random.randint(n)\n visited = []\n unvisited = list(range(n))\n node_now = source\n node_next = -1\n paths_array[i, 0] = source\n\n for j in range(1, n):\n visited.append(node_now)\n unvisited.remove(node_now)\n prob_roulette = np.array([0]*n, dtype=float)\n for k in unvisited:\n prob_roulette[k] = (pow(tau[node_now, k], alpha)\n * pow(eta[node_now, k], beta))\n prob_roulette = prob_roulette/sum(prob_roulette)\n cum_roulette = prob_roulette.cumsum()\n cum_roulette -= np.random.uniform(0, 1)\n node_next = list(cum_roulette >= 0).index(True)\n paths_array[i, j] = node_next\n paths_length[i] += map.D[node_now, node_next]\n node_now = node_next\n paths_length[i] += map.D[node_now, source]\n\n if its == 0:\n distance_best[its] = paths_length.min()\n path_best[its] = paths_array[paths_length.argmin()].copy()\n else:\n if distance_best[its-1] < paths_length.min():\n distance_best[its] = distance_best[its-1]\n path_best[its] = path_best[its-1].copy()\n else:\n distance_best[its] = paths_length.min()\n path_best[its] = paths_array[paths_length.argmin()].copy()\n\n add_tau = np.zeros((n, n))\n\n for i in range(m):\n for j in range(n):\n row = paths_array[i, j]\n col = paths_array[i, (j+1) % n]\n add_tau[row][col] += q/paths_length[i]\n\n tau = (1 - rho)*tau + add_tau\n\n its += 1\n\n return Hamiltonian(path_best[-1], map)", "def objective(V,m,adj):\r\n #number of edges in G = (V,E)\r\n link = 0\r\n for i in range(m):\r\n for j in range(i,m):\r\n if i != j and (V[i],V[j]) in adj or (V[j],V[i]) in adj:\r\n link += 1\r\n #number of expected edges to have a module\r\n edges = factorial(m)/(factorial(m-2)*2)\r\n obj = edges - link\r\n\r\n return obj", "def run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\r\n\r\n y = []\r\n dp_scores = []\r\n back_pointer = []\r\n\r\n for i in xrange(N):\r\n dp_scores.append([])\r\n back_pointer.append([])\r\n for j in xrange(L):\r\n if (i == 0):\r\n score = start_scores[j] + emission_scores[0, j]\r\n back = -1\r\n else:\r\n max = dp_scores[i-1][0] + trans_scores[0, j]\r\n back = 0\r\n for k in xrange(L):\r\n if (dp_scores[i-1][k] + trans_scores[k, j] > max):\r\n max = dp_scores[i-1][k] + trans_scores[k, j]\r\n back = k\r\n score = max + emission_scores[i, j]\r\n dp_scores[i].append(score)\r\n back_pointer[i].append(back)\r\n\r\n s = dp_scores[N-1][0] + end_scores[0]\r\n back = 0\r\n for k in xrange(L):\r\n if (dp_scores[N-1][k] + end_scores[k] > s):\r\n s = dp_scores[N-1][k] + end_scores[k]\r\n back = k\r\n\r\n y.append(back)\r\n for i in range(N-1, 0, -1):\r\n y.append(back_pointer[i][back])\r\n back = back_pointer[i][back]\r\n y.reverse()\r\n\r\n return (s, y)", "def calculate_path(self):\n #Se repite el ciclo para el número especificado de veces\n for i in range(self.iterations):\n for ant in self.ants:\n ant.setup_ant()\n while not ant.final_node_reached:\n #Seleccion aleatoria del nodo a visitar\n node_to_vist = self.select_next_node(self.map.nodes_array[int(ant.actual_node[0])][int(ant.actual_node[1])])\n #Mover la hormiga al siguiente nodo seleccionado al azar\n ant.move_ant(node_to_visit)\n #Compruebe si se ha alcanzado la solución\n ant.is_final_node_reached()\n #Agregar la ruta resultante a la lista de rutas\n self.add_to_path_results(self.delete_loops(ant.get_visited_nodes()))\n # Habilitar a la hormiga para otra busqueda\n ant.enable_start_new_path()\n \n # Actualizar el nivel global de feromonas\n self.pheromone_update()\n self.best_result = self.paths[0]\n\n #Vaciar la lista de rutas\n self.empty_paths()\n print('Iteration: ', i, 'lenght of the path: ', len(self.best_result))\n return self.best_result", "def get_transition(self, row, col, action, tot_row, tot_col):\n\n '''\n Expand the grid of the environment to handle when the \n agent decides to move in the direction of a wall \n '''\n state_probabilities = np.zeros((int(np.sqrt(self.env.observation_space.n)) + 2, int(np.sqrt(self.env.observation_space.n)) + 2), dtype=float)\n\n if action == 'UP':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.33 #UP\n state_probabilities[row, col - 1 ] = 0.33 #LEFT\n state_probabilities[row, col + 1] = 0.33 # RIGHT\n state_probabilities[row + 1, col] = 0.0 #DOWN\n elif action == 'LEFT':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.33 #UP\n state_probabilities[row, col - 1 ] = 0.33 #LEFT\n state_probabilities[row, col + 1] = 0.0 # RIGHT\n state_probabilities[row + 1, col] = 0.33 #DOWN\n elif action == 'RIGHT':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.33 #UP\n state_probabilities[row, col - 1 ] = 0.0 #LEFT\n state_probabilities[row, col + 1] = 0.33 # RIGHT\n state_probabilities[row + 1, col] = 0.33 #DOWN\n elif action == 'DOWN':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.0 # UP\n state_probabilities[row, col - 1] = 0.33 # LEFT\n state_probabilities[row, col + 1] = 0.33 # RIGHT\n state_probabilities[row + 1, col] = 0.33 # DOWN\n\n for row in range (0, tot_row+1):\n if state_probabilities[row, 0] != 0:\n state_probabilities[row, 1] += state_probabilities[row, 0]\n elif state_probabilities[row, -1] != 0:\n state_probabilities[row, -2] += state_probabilities[row, -1]\n\n for col in range (0, tot_col+1):\n if state_probabilities[0, col] != 0:\n state_probabilities[1, col] += state_probabilities[0, col]\n elif state_probabilities[-1, col] != 0:\n state_probabilities[-2, col] += state_probabilities[-1, col]\n\n return state_probabilities[1: 1+tot_row, 1:1+tot_col]", "def get_transition(self, row, col, action, tot_row, tot_col):\n\n '''\n Expand the grid of the environment to handle when the \n agent decides to move in the direction of a wall \n '''\n state_probabilities = np.zeros((int(np.sqrt(self.env.observation_space.n)) + 2, int(np.sqrt(self.env.observation_space.n)) + 2), dtype=float)\n\n if action == 'UP':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.33 #UP\n state_probabilities[row, col - 1 ] = 0.33 #LEFT\n state_probabilities[row, col + 1] = 0.33 # RIGHT\n state_probabilities[row + 1, col] = 0.0 #DOWN\n elif action == 'LEFT':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.33 #UP\n state_probabilities[row, col - 1 ] = 0.33 #LEFT\n state_probabilities[row, col + 1] = 0.0 # RIGHT\n state_probabilities[row + 1, col] = 0.33 #DOWN\n elif action == 'RIGHT':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.33 #UP\n state_probabilities[row, col - 1 ] = 0.0 #LEFT\n state_probabilities[row, col + 1] = 0.33 # RIGHT\n state_probabilities[row + 1, col] = 0.33 #DOWN\n elif action == 'DOWN':\n row += 1\n col += 1\n state_probabilities[row - 1, col] = 0.0 # UP\n state_probabilities[row, col - 1] = 0.33 # LEFT\n state_probabilities[row, col + 1] = 0.33 # RIGHT\n state_probabilities[row + 1, col] = 0.33 # DOWN\n\n for row in range (0, tot_row+1):\n if state_probabilities[row, 0] != 0:\n state_probabilities[row, 1] += state_probabilities[row, 0]\n elif state_probabilities[row, -1] != 0:\n state_probabilities[row, -2] += state_probabilities[row, -1]\n\n for col in range (0, tot_col+1):\n if state_probabilities[0, col] != 0:\n state_probabilities[1, col] += state_probabilities[0, col]\n elif state_probabilities[-1, col] != 0:\n state_probabilities[-2, col] += state_probabilities[-1, col]\n\n return state_probabilities[1: 1+tot_row, 1:1+tot_col]", "def viterbi(obs: List[int], pi: List[float], A: np.ndarray, B: np.ndarray) -> List[int]:\n pi_log = np.log(pi)\n A_log = np.log(A)\n B_log = np.log(B)\n states = A.shape[0]\n n = len(obs)\n\n D_log = np.zeros((states, n))\n backtrack = np.zeros((states, n - 1)).astype(int)\n D_log[:, 0] = pi_log + B_log[:, obs[0]]\n\n for j in range(1, n):\n for i in range(states):\n temp_sum = A_log[:, i] + D_log[:, j - 1]\n D_log[i, j] = np.max(temp_sum) + B_log[i, obs[j]]\n backtrack[i, j - 1] = np.argmax(temp_sum)\n\n state = np.zeros(n).astype(int)\n state[-1] = np.argmax(D_log[:, -1])\n for n in range(n - 2, -1, -1):\n state[n] = backtrack[int(state[n + 1]), n]\n state = state.tolist()\n return state", "def create_matrices(maze, reward, penalty_s, penalty_l, prob):\n \n r, c = np.shape(maze)\n states = r*c\n p = prob\n q = (1 - prob)*0.5\n \n # Create reward matrix\n path = maze*penalty_s\n walls = (1 - maze)*penalty_l\n combined = path + walls\n \n combined[-1, -1] = reward\n \n R = np.reshape(combined, states)\n \n # Create transition matrix\n T_up = np.zeros((states, states))\n T_left = np.zeros((states, states))\n T_right = np.zeros((states, states))\n T_down = np.zeros((states, states))\n \n wall_ind = np.where(R == penalty_l)[0]\n\n for i in range(states):\n # Up\n if (i - c) < 0 or (i - c) in wall_ind :\n T_up[i, i] += p\n else:\n T_up[i, i - c] += p\n \n if i%c == 0 or (i - 1) in wall_ind:\n T_up[i, i] += q\n else:\n T_up[i, i-1] += q\n \n if i%c == (c - 1) or (i + 1) in wall_ind:\n T_up[i, i] += q\n else:\n T_up[i, i+1] += q\n \n # Down\n if (i + c) > (states - 1) or (i + c) in wall_ind:\n T_down[i, i] += p\n else:\n T_down[i, i + c] += p\n \n if i%c == 0 or (i - 1) in wall_ind:\n T_down[i, i] += q\n else:\n T_down[i, i-1] += q\n \n if i%c == (c - 1) or (i + 1) in wall_ind:\n T_down[i, i] += q\n else:\n T_down[i, i+1] += q\n \n # Left\n if i%c == 0 or (i - 1) in wall_ind:\n T_left[i, i] += p\n else:\n T_left[i, i-1] += p\n \n if (i - c) < 0 or (i - c) in wall_ind:\n T_left[i, i] += q\n else:\n T_left[i, i - c] += q\n \n if (i + c) > (states - 1) or (i + c) in wall_ind:\n T_left[i, i] += q\n else:\n T_left[i, i + c] += q\n \n # Right\n if i%c == (c - 1) or (i + 1) in wall_ind:\n T_right[i, i] += p\n else:\n T_right[i, i+1] += p\n \n if (i - c) < 0 or (i - c) in wall_ind:\n T_right[i, i] += q\n else:\n T_right[i, i - c] += q\n \n if (i + c) > (states - 1) or (i + c) in wall_ind:\n T_right[i, i] += q\n else:\n T_right[i, i + c] += q\n \n T = [T_up, T_left, T_right, T_down] \n \n return T, R", "def viterbi(self, observation):\n N=len(observation)\n tab=[[0]*self.nStates for i in range(N)]\n backtrack=[[-1]*self.nStates for i in range(N)]\n if not self.logdomain:\n self.__convert_to_log()\n\n for i in range(self.nStates):\n tab[0][i]=self.e[i][observation[0]]+self.pi[i]\n \n for i in range(1,N):\n for j in range(self.nStates):\n smax=-1\n maxval=float('-inf')\n for s in range(self.nStates):\n cs=tab[i-1][s]+self.t[s][j]\n if cs>maxval:\n smax=s\n maxval=cs\n assert(smax>-1 and smax<self.nStates)\n tab[i][j]=self.e[j][observation[i]]+maxval\n backtrack[i][j]=smax\n\n smax=-1\n llike=float('-inf')\n for s in range(self.nStates):\n if llike<tab[N-1][s]:\n llike=tab[N-1][s]\n smax=s\n\n best=[-1]*N\n best[-1]=smax\n for i in range(N-2, -1, -1):\n best[i]=backtrack[i+1][best[i+1]]\n\n return best, llike", "def cost_function(H, n_qubits, p, params):\n ini_state=plus_state(n_qubits)\n for i in range(p):\n ini_state=qaoa_step(ini_state,H,n_qubits,params=[params[2*i],params[2*i+1]])\n return ((sparse.spmatrix.getH(ini_state)).dot(H.dot(ini_state))).real, ini_state", "def viterbi1(self, e_phi, states_dict):\n \n states = [\"0\",\"1\",\"2\"]\n \n if not states_dict:\n first_dict = {} \n for state in states: \n S_e = self.score(e_phi, state) \n first_dict[state] = (S_e,([]))\n return [first_dict] \n \n else:\n last_dict = states_dict[-1]\n this_dict = {}\n for state in states:\n S_e = self.score(e_phi, state)\n max_score=-float('inf')\n max_label = None\n for prev in states:\n (Sprev, (Hprev))=last_dict[prev]\n if not Hprev:\n Hstate = [prev] # no history\n else:\n Hstate = Hprev[1:]+[prev] \n t_phi = tfeats(Hstate, self.order)\n \n partial_score = Sprev+self.score(t_phi, state)\n if max_score < partial_score:\n S_max = partial_score\n max_hstate = Hstate\n # write to dict\n this_dict[state]=(S_max+S_e,(max_hstate))# brakets\n states_dict.append(this_dict)\n return states_dict", "def forward_backward(observations):\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE\n #\n # observations = [(4, 3), (4, 2), (3, 2), (4, 0), (2, 0), (2, 0), (3, 2), \n # (4, 2), (2, 3), (3, 5)]\n num_time_steps = len(observations)\n forward_messages = [None] * num_time_steps\n forward_messages[0] = prior_matrix\n # # # TODO: Compute the forward messages\n for i,x_i in enumerate(observations):\n if x_i:\n obs_index = obs_state_index_map[x_i]\n pi_0 = forward_messages[i]\n # print(len(B[:obs_index]))\n weights = np.multiply(pi_0, B[:,obs_index])\n # x = sum([A[j,:]* w_i.T for j,w_i in enumerate(weights)])\n else:\n weights = forward_messages[i]\n # print(weights)\n x = sum([A[j,:]* w_i.T for j,w_i in enumerate(weights)])\n if i+1 < len(forward_messages):\n forward_messages[i+1] = x#normalize(x)\n # break\n\n ## forward messages as dictionary\n # for_dict = [None]*num_time_steps\n # for j,f in enumerate(forward_messages):\n # x = Distribution()\n # for i,x_i in enumerate(f):\n # if x_i == 0:\n # continue\n # # print(i,x_i)\n # x[all_possible_hidden_states[i]] = x_i\n # for_dict[j] = x.renormalize()\n # print(for_dict[3])\n\n # print('--------------\\n-----------------\\n')\n\n\n backward_messages = [None] * num_time_steps\n # backward_messages[-1] = [1]*len(prior_matrix)\n message = np.ones(len(all_possible_hidden_states), dtype=np.float64)\n backward_messages[-1] = message/len(all_possible_hidden_states)\n \n# ****\n ## Backwards messages\n for i,x_i in enumerate(reversed(observations)):\n # print(x_i)\n if x_i:\n obs_index = obs_state_index_map[x_i]\n pi = backward_messages[-1-i]\n weights = np.multiply(pi, B[:,obs_index])\n else:\n weights = backward_messages[-1-i]\n # print(i)\n x = sum([A[:,j]*w_i for j,w_i in enumerate(weights)])\n\n if i+1 < len(backward_messages):\n backward_messages[-2-i] = x#normalize(x)\n\n ## backward messages as dictionary\n # back_dict = [None]*num_time_steps\n # for j,b in enumerate(backward_messages):\n # x = Distribution()\n # if b == None:\n # continue\n # for i,x_i in enumerate(b):\n # if x_i == 0 or x_i==None:\n # continue\n # # print(i,x_i)\n # x[all_possible_hidden_states[i]] = x_i\n # back_dict[j] = x.renormalize()\n \n # print(back_dict[0])\n # print(A[:10,:10])\n # print('\\n-----------------\\n', B[:10,:10])\n\n # print(backward_messages[2])\n # backward_messages[0] = forward_messages[0]\n # # ## marginals as matrix\n marginals = [None] * num_time_steps \n for i,x_i in enumerate(observations):\n if x_i:\n obs_index = obs_state_index_map[x_i]\n marginals[i] = np.multiply(np.multiply(backward_messages[i],\n forward_messages[i]),\n B[:,obs_index])\n else:\n marginals[i] = np.multiply(backward_messages[i],forward_messages[i])\n # if i == 0:\n # marginals[i] = np.multiply(backward_messages[i], B[:,obs_index])\n # elif i == len(observations)-1:\n # marginals[i] = np.multiply(forward_messages[i], B[:,obs_index])\n # else:\n\n ## marginals as dictionary\n marg_dict = [None]*num_time_steps\n for j,m in enumerate(marginals):\n x = Distribution()\n for i,x_i in enumerate(m):\n if x_i == 0 or x_i==None:\n continue\n x[all_possible_hidden_states[i]] = x_i\n marg_dict[j] = x.renormalize()\n # print(marginals[i])\n # print(A[:10, :10], '\\n')\n # print(B[:10, :10], '\\n')\n # print(marg_dict)\n return marg_dict", "def objective(trial):\n %time\n env = gym.make('Delivery-v0')\n alpha = trial.suggest_discrete_uniform('alpha', 0.3,0.9,0.3)\n gamma = trial.suggest_discrete_uniform('gamma', 0.6, 1,0.1)\n epsilon = trial.suggest_discrete_uniform('epsilon', 0.01, 0.11, 0.04)\n episodes = 1000000\n \n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n #Initialize Q table of 22500 x 8 size (22500 states and 8 actions) with all zeroes\n q_table = np.zeros([env.observation_space.n, env.action_space.n]) \n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n if random.uniform(0, 1) < epsilon:\n action = env.action_space.sample() # Explore action space randomly\n else:\n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n\n next_state, reward, done, info = env.step(action) \n\n old_value = q_table[state, action]\n next_max = np.max(q_table[next_state])\n\n new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)\n q_table[state, action] = new_value\n\n if reward == -10:\n penalties += 1\n \n\n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n rewards.append(np.sum(episode_rewards))\n \n last_reward = np.mean(rewards)\n # trial.report(-1 * last_reward)\n\n return -1 * last_reward", "def take_step(prob_v, t, xsize, ysize):\n xs = xsize\n ys = ysize\n tmpprobs = []\n for x in range(xs):\n tmpprobs.append([])\n for y in range(ys):\n tmpprobs[x].append([])\n for n in prob_v:\n for cell in n:\n x = cell.x\n y = cell.y\n p = cell.conf_in\n tmpprobs[x][y] = p, None, None\n for n in prob_v:\n for cell in n:\n if cell.started and cell.not_finished:\n x = cell.x\n y = cell.y\n step_prob = cell.conf_out\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if abs(dx) == abs(dy): # no diags\n continue\n nx = x + dx\n ny = y + dy\n if nx < 0 or \\\n nx >= xs or \\\n ny < 0 or \\\n ny >= ys:\n continue\n # ok, nx, ny on board\n if step_prob > tmpprobs[nx][ny][0]:\n tmpprobs[nx][ny] = step_prob, x, y\n for n in prob_v:\n for cell in n:\n x = cell.x\n y = cell.y\n p = cell.conf_in\n if tmpprobs[x][y][0] > p:\n cell.set_prob_in(tmpprobs[x][y][0], t, tmpprobs[x][y][1], tmpprobs[x][y][2])\n return prob_v", "def _updateTransitionMatrix(self):\n N = self.N\n K = self.K\n T= self.T\n\n for i in range(1,self.K+1):\n den = 0\n for t in range(1,self.T):\n for n in range(1,N+1):\n den = den + self.posterior_state_trellis[n][(t,i)]\n \n for j in range(1,self.K+1): \n # For some state i,j\n s = 0\n for n in range(1,N+1): \n for t in range(1,self.T): \n cur_prob = self.posterior_transition_trellis[n][(t,t+1,j,i)]\n s = s+cur_prob\n\n # Compute total \n self.state_transition_mat[(j,i)] = (s/den)", "def solve(self):\n # Use a trivial tour (1-2-3-...-N-1) to set the global upper bound.\n tour = list(range(self._N))\n upper_bound = sum([self._G[i][(i + 1) % self._N] for i in range(self._N)])\n trace = []\n\n # Start from a configuration with a single vertex.\n frontier = [BranchAndBoundConfiguration(self._G, self._N, [0], LOWER_BOUND_METHOD)]\n\n # Set the start time.\n start_time = time.time()\n\n # Branch and bound until the frontier set is empty or the time has expired.\n while frontier and (time.time() - start_time) < self._cutoff_time:\n # Fetch the most promising configuration.\n config = heappop(frontier)\n\n # Expand configuration by appending a vertex to the path.\n for v in range(self._N):\n try:\n expanded_config = config.expand(v)\n except ValueError:\n # Expanded configuration is not valid.\n continue\n if expanded_config.is_solution():\n # Update the global upper bound, if needed.\n this_solution = expanded_config.get_cycle_cost()\n if this_solution < upper_bound:\n # Log it.\n trace.append((time.time() - start_time, this_solution))\n # Update the best solution.\n upper_bound = this_solution\n tour = list(expanded_config.get_path())\n elif expanded_config.get_lower_bound() < upper_bound:\n # Add to the frontier set.\n heappush(frontier, expanded_config)\n return (upper_bound, [self._index_to_id[v] for v in tour], trace)", "def binary_dec(A,n_iter = 1000):\n\n\t### Initialization ###\n\n\tp, q = np.shape(A)\n\t### B : to be changed\n\tB = np.eye(p)\n \t###\n\tC = bin_random_mat(p,q)\n\tlist_dist = []\n\tB_argmin = B\n\tC_argmin = C\n\n\n\n\n\t## temperature ##\n\tT_n = np.log(np.arange(2,n_iter+2,1))\n\t#T_n = np.arange(2,n_iter+2,1)\n\tfor i in range(n_iter):\n\t## update ##\n\t\tC_0 = np.matrix(C)\n\t\tlist_dist =np.append( list_dist, V_potential(np.dot(B,C_0),A) )\n\t\tif V_potential(np.dot(B_argmin,C_argmin),A) == 0:\n\t\t\tbreak\n\t########## transition #############\n\t# Here we take 2 steps independent(for B and for C respectively)\n\t# We could also use metropolis hasting kernel.\n\n\t\tC_iter = np.matrix(Metropolis_transition_C(C))\n\t\n\n\t\tB_iter = B[np.random.permutation(np.arange(p))]\n\t\t\n\t\tif np.random.uniform(0,1,1) < \\\n\t\t\t\tnp.exp(-1./T_n[i]*( V_potential(np.dot(B_iter,C_iter), A)\\\n\t\t\t\t - V_potential(np.dot(B,C_0),A) ) ):\n\t\t\tC = C_iter\n\t\t\tB = B_iter\n\t######### end of transition ##############\n\n\t\t\tif V_potential(np.dot(B,C),A) < np.min(list_dist):\n\t\t\t\t\n\t\t\t\tB_argmin = B\n\t\t\t\tC_argmin = np.matrix(C)\n\t\t\t# print i+1\n\t\t\t# print V_potential(np.dot(B_argmin,C_argmin),A)\n\t\t\t# print C_argmin\n\t\t\t# print '\\n'\n\n\treturn list_dist,B_argmin, C_argmin", "def viterbi_paths(self, X: List[np.ndarray], **kwargs) -> Tuple[List[np.ndarray], List[np.ndarray]]:", "def valueIteration(P,R,gamma,theta,initial_v,max_iter=1e8):\n print('Running value iteration ...')\n\n def one_step_lookahead(s, V):\n \"\"\"\n :param state: current state\n :param v: current value estimator\n :return: A, list of optimal action values under current value estimator\n \"\"\"\n num_a = num_actions\n num_S = num_states\n\n A = np.zeros(num_a)\n\n for a in range(num_a):\n for s_prime in range(num_S):\n A[a] += P[s, a, s_prime] * (R[s, a, s_prime] + gamma * V[s_prime])\n return A\n \n # initialization\n v = initial_v \n num_states, num_actions = P.shape[:2]\n k = 0 \n best_actions = [0] * num_states\n delta = 1000\n\n while delta > theta and k <= max_iter:\n delta = 0\n k += 1\n for s in range(num_states):\n action_values = one_step_lookahead(s, v)\n best_action_value = np.max(action_values)\n delta = max(delta, np.abs(best_action_value - v[s]))\n v[s] = best_action_value\n print(delta)\n\n for s in range(num_states):\n A = one_step_lookahead(s, v)\n best_actions[s] = np.argmax(A)\n\n\n print('number of iterations:', k)\n return best_actions, v", "def initialise(self, observation):\n # Initialise viterbi, including\n # transition from <s> to observation\n # use costs (-log-base-2 probabilities)\n # TODO\n # empty everything\n self.viterbi = dict()\n self.backpointer = dict()\n # lambda expression of the sum of negative log probs\n cost = lambda p, q: - float(p + q)\n # The Viterbi table should be m*n where m is the number of states\n # and n is the number of words.\n # Initialliy, for each state, we calculate the emission probability\n # (the prob of observation given the state), and the transition\n # probability (state given the start symbol), sum the negative logs of\n # them to get the corresponding cost.\n # I chose to use dict() to implement the Viterbi table because it supports\n # a pair of keys, i.e. [state, t]\n for i in range(len(self.states)):\n state = self.states[i]\n p_obs_given_pos = self.emission_PD[state].logprob(observation)\n p_pos_given_start = self.transition_PD['<s>'].logprob(state)\n self.viterbi[state, 0] = cost(p_obs_given_pos, p_pos_given_start)\n\n # Initialise backpointer\n # TODO\n # Initialise the backpointer by filling in m 0s. Again, use the pair\n # key: [state, t].\n self.backpointer[state, 0] = 0", "def task2_extra2():\n N = 0\n lam = 0\n L = 10\n h = 0.001\n tau = 0.000099\n aa = numpy.array([0.25*a for a in range((L-1)*4)])\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n Vm = V1D(lam, x)\n # eps=int(0.1*len(x))\n\n iterss = []\n for a in aa:\n print(a)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center ($L={}$)\".format(L))\n plt.xlabel(\"$a$\")\n plt.ylabel(\"Time\")\n plt.plot(aa, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel_fixedL={}.pdf\".format(L), bbox_inches=\"tight\")", "def screening_graph_estimate(S, lambdaL, p, maxdf, idx_scr, threshold=1e-4, max_iter=10000):\n nlambda = lambdaL.shape[0]\n nscr = idx_scr.shape[0]\n x = np.zeros(p * maxdf * nlambda)\n col_cnz = np.zeros(p + 1).astype(int)\n row_idx = np.zeros(p * maxdf * nlambda).astype(int)\n idx_a = np.zeros(nscr).astype(int)\n w1 = np.zeros(p)\n\n cnz = 0\n for m in range(p):\n idx_i = np.copy(idx_scr[:, m])\n w0 = np.zeros(p)\n size_a = 0\n\n for i in range(nlambda):\n ilambda = lambdaL[i]\n gap_ext = 1\n iter_ext = 0\n while gap_ext > 0 and iter_ext < max_iter:\n size_a_prev = size_a\n for j in range(nscr):\n w_idx = idx_i[j]\n if w_idx != -1:\n r = S[m, w_idx]\n for k in range(size_a):\n rss_idx = idx_a[k]\n r -= S[w_idx, rss_idx] * w0[rss_idx]\n\n if abs(r) > ilambda:\n if r >= 0:\n w1[w_idx] = r - ilambda\n else:\n w1[w_idx] = r + ilambda\n idx_a[size_a] = w_idx\n size_a += 1\n idx_i[j] = -1\n else:\n w1[w_idx] = 0\n w0[w_idx] = w1[w_idx]\n\n gap_ext = size_a - size_a_prev\n\n gap_int = 1\n iter_int = 0\n while gap_int > threshold and iter_int < max_iter:\n tmp1 = 0\n tmp2 = 1e-4\n for j in range(size_a):\n w_idx = idx_a[j]\n r = S[m, w_idx] + w0[w_idx]\n\n for k in range(size_a):\n rss_idx = idx_a[k]\n r -= S[w_idx, rss_idx] * w0[rss_idx]\n\n if abs(r) > ilambda:\n if r >= 0:\n w1[w_idx] = r - ilambda\n else:\n w1[w_idx] = r + ilambda\n tmp2 += abs(w1[w_idx])\n else:\n w1[w_idx] = 0\n tmp1 += abs(w1[w_idx] - w0[w_idx])\n w0[w_idx] = w1[w_idx]\n gap_int = tmp1 / tmp2\n iter_int += 1\n iter_ext += 1\n\n for j in range(size_a):\n w_idx = idx_a[j]\n x[cnz] = w1[w_idx]\n row_idx[cnz] = i * p + w_idx\n cnz += 1\n col_cnz[m + 1] = cnz\n\n return col_cnz, row_idx, x", "def second_heuristic(self):\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n # aceasta matrice indica valoarea pe care o are mutarea unei piese pe o celula aleasa\r\n # se va aduna la media ponderilor adunate in lista weights\r\n\r\n # mijlocul tablei este punctul cel mai vulnerabil\r\n # in timp ce lateralele sunt sigure,iar linia bazei transforma piesa in rege\r\n\r\n points = [[0, 4, 0, 4, 0, 4, 0, 4],\r\n [4, 0, 3, 0, 3, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 1, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 4, 0, 4, 0, 4, 0]]\r\n\r\n weights = [0 for i in range(4)]\r\n whites, blacks = 0, 0\r\n for i in range(8):\r\n for j in range(8):\r\n\r\n # numaram discurile de fiecare culoarea\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n\r\n if self.matrix[i][j] in [self.current_player, self.current_player.upper()]:\r\n\r\n # daca e piesa normala\r\n if self.matrix[i][j] == self.current_player:\r\n weights[0] += 4\r\n\r\n # cat de aproape este piesa de a deveni rege ( nr de linii din tabla - cate mai are pana ajunge pe ultima linie)\r\n\r\n # cu cat se apropie piesa mai multe de a deveni rege, scorul creste( negru - rege pentru i=0, alb -rege pentru i =7)\r\n if self.matrix[i][j] == 'n':\r\n weights[1] += (7 - i)\r\n elif self.matrix[i][j] == 'a':\r\n weights[1] += i\r\n else:\r\n # daca e piesa rege\r\n weights[0] += 8\r\n\r\n # cat de aproape este piesa rege de celelalte piese\r\n for d in directions:\r\n if self.matrix[i][j] == self.current_player.upper():\r\n # gaseste pe diagonala in directia d, o piesa adversara,daca exista\r\n x, y = self.find_piesa(i, j, d)\r\n if x and y:\r\n weights[2] += (x - i) * (x - i) + (y - j) * (y - j)\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n # piesele pe care le poate captura jucatorul, daca e piesa rege are un scor mai mare\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n if self.matrix[next_x][next_y] == self.opponent().upper():\r\n weights[3] += 7\r\n else:\r\n weights[3] += 4\r\n # piese care pot fi capturate; la fel daca este piesa rege atunci se scade mai mult scorul\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[3] -= 6\r\n else:\r\n weights[3] -= 3\r\n # adunam piesa la media sumei date pentru a face AI-ul in caz de egalitate a scorului\r\n # sa imi aleaga piesa care ma pozitioneaza mai bine\r\n if self.move:\r\n return sum(weights) / 4 + points[self.move[0]][self.move[1]]\r\n return sum(weights) / 4\r\n\r\n def __str__(self):\r\n s = ' '\r\n for i in range(8):\r\n s += str(i) + ' '\r\n s += '\\n'\r\n for index, line in enumerate(self.matrix):\r\n s += str(chr(index + ord('a'))) + ' '\r\n for el in line:\r\n s += str(el) + ' '\r\n s += '\\n'\r\n\r\n return s", "def compute_cost_matrix(self):\n\n if rank == 0:\n #do random sampling of a parameters\n if self.sampling == \"LHS\":\n lhs = Lhs(lhs_type=\"classic\", criterion=None)\n param_samples = lhs.generate(self.sample_space, self.niters)\n elif self.sampling == \"rsampling\":\n param_samples = self.sample_space.rvs(self.niters)\n elif self.sampling == \"Sobol\":\n sobol = Sobol()\n param_samples = sobol.generate(self.sample_space.dimensions, self.niters)\n \n # generate param samples split\n niters_rank0 = self.niters//size + self.niters % size\n niters_rank = self.niters//size\n count_scatter = [niters_rank0]\n count_scatter.extend((size-2)*[niters_rank])\n count_scatter = np.cumsum(count_scatter)\n\n param_samples_split = np.split(param_samples,count_scatter)\n else:\n param_samples_split = None\n \n #scatter parameter samples data\n param_samps = comm.scatter(param_samples_split,root=0)\n\n # initialize data\n param_samples_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n param_samples_diff_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n jac_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n qoi_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n\n \n\n # evaluate QoI at random sampling\n for sample in param_samps: \n qoi_sample, jac_sample = self.jac(sample).values()\n # store output\n for qoi_name in self.funcnames:\n if not (jac_sample[qoi_name] is None):\n param_samples_dict_rank[qoi_name].append(jac_sample[qoi_name])\n jac_dict_rank[qoi_name].append(jac_sample[qoi_name])\n qoi_dict_rank[qoi_name].append(qoi_sample[qoi_name])\n else:\n param_samples_diff_dict_rank[qoi_name].append(sample)\n\n # gather data\n param_samples = None\n param_samples_diff_int = None\n jac_dict = None\n qoi_dict= None\n\n param_samples_dict = comm.gather(param_samples_dict_rank, root=0)\n params_samples_diff_dict = comm.gather(param_samples_diff_dict_rank, root=0)\n jac_dict = comm.gather(jac_dict_rank, root=0)\n qoi_dict = comm.gather(qoi_dict_rank, root=0)\n\n # format gathered data\n if rank == 0:\n #flatten data\n param_samples_dict_flattened = {qoi_name:[] for qoi_name in self.funcnames}\n param_samples_diff_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n jac_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n qoi_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n\n for cpurank in range(size):\n for qoi_name in self.funcnames:\n param_samples_dict_flattened[qoi_name].extend(param_samples_dict[cpurank][qoi_name]) \n param_samples_diff_dict_flattened[qoi_name].extend(params_samples_diff_dict[cpurank][qoi_name])\n jac_dict_flattened[qoi_name].extend(jac_dict[cpurank][qoi_name])\n qoi_dict_flattened[qoi_name].extend(qoi_dict[cpurank][qoi_name])\n\n #compute outer product\n jac_outer_dict = {qoi_name: [] for qoi_name in self.funcnames}\n nfuncs_dict = {qoi_name: 0 for qoi_name in self.funcnames}\n\n for qoi_name in self.funcnames:\n for i in range(len(jac_dict_flattened[qoi_name])):\n jac_sample = jac_dict_flattened[qoi_name][i]\n jac_outer_dict[qoi_name].append(np.outer(jac_sample,jac_sample))\n nfuncs_dict[qoi_name] += 1\n\n # compute cost matrix and norm convergence\n cost_matrix_dict = {}\n cost_matrix_cumul_dict = {}\n norm_convergence_dict = {}\n\n for qoi_name in self.funcnames:\n cost_cumsum = np.cumsum(jac_outer_dict[qoi_name],axis=0)/np.arange(1,nfuncs_dict[qoi_name]+1)[:,None,None]\n cost_matrix_cumul_dict[qoi_name] = cost_cumsum\n cost_matrix_dict[qoi_name] = cost_cumsum[-1,:,:]\n norm_convergence_dict[qoi_name] = np.linalg.norm(cost_cumsum,ord='fro',axis=(1,2))\n\n # compute variance matrix\n variance_matrix_dict = {}\n for qoi_name in self.funcnames:\n variance_mat = np.sum((jac_outer_dict[qoi_name]-cost_matrix_dict[qoi_name])**2/(nfuncs_dict[qoi_name]-1),axis=0) \n variance_matrix_dict[qoi_name] = variance_mat\n\n param_results = {\"PARAM_SAMPLES\": param_samples_dict_flattened,\n \"DIFFICULT_PARAM_SAMPLES\": param_samples_diff_dict_flattened}\n\n fun_results = {\"NUMBER_OF_FUNCTION_SUCCESS\": nfuncs_dict,\n \"NORM_OF_SEQ_OF_CUMUL_SUMS\": norm_convergence_dict,\n \"SEQ_OF_CUMUL_SUMS\": cost_matrix_cumul_dict, \n \"VARIANCE_OF_ENTRIES\": variance_matrix_dict,\n \"FINAL_COST_MATRIX\":cost_matrix_dict}\n\n return {'PARAMETER_RESULTS': param_results, 'FUNCTION_RESULTS': fun_results}", "def task2_extra():\n N = 0\n lam = 0\n Ls = numpy.array([2*L for L in range(1,23)])\n h = 0.01\n tau = 0.000099\n\n iterss = []\n\n for L in Ls:\n a = L // 2\n print(L)\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n # eps = int(0.1 * len(x))\n\n Vm = V1D(lam, x)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center\")\n plt.xlabel(\"$L$\")\n plt.ylabel(\"Time\")\n plt.plot(Ls, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel.pdf\", bbox_inches=\"tight\")", "def solve(n_vec, m_vec, p_vec, repeat, dns_level, seed, solver='gurobi'):\n\n print(\"Solving random problems with solver %s\\n\" % solver)\n\n # Define statistics to record\n std_solve_time = np.zeros(len(n_vec))\n avg_solve_time = np.zeros(len(n_vec))\n min_solve_time = np.zeros(len(n_vec))\n max_solve_time = np.zeros(len(n_vec))\n\n n_prob = len(n_vec)\n\n # Store also OSQP time\n if solver == 'miosqp':\n # Add OSQP solve times statistics\n avg_osqp_solve_time = np.zeros(len(n_vec))\n\n # reset random seed\n np.random.seed(seed)\n\n for i in range(n_prob):\n\n # Get dimensions\n n = n_vec[i]\n m = m_vec[i]\n p = p_vec[i]\n\n print(\"problem n = %i, m = %i, p = %i\" % (n, m, p))\n\n # Define vector of cpu times\n solve_time_temp = np.zeros(repeat)\n\n # Store also OSQP time\n if solver == 'miosqp':\n osqp_solve_time_temp = np.zeros(repeat)\n\n for j in tqdm(range(repeat)):\n # for j in range(repeat):\n\n # Generate random vector of indeces\n i_idx = np.random.choice(np.arange(0, n), p, replace=False)\n\n # Generate random Matrices\n Pt = spa.random(n, n, density=dns_level)\n P = spa.csc_matrix(np.dot(Pt, Pt.T))\n q = sp.randn(n)\n A = spa.random(m, n, density=dns_level)\n u = 2 + sp.rand(m)\n l = -2 + sp.rand(m)\n\n # Enforce [0, 1] bounds on variables\n i_l = np.zeros(p)\n i_u = np.ones(p)\n # A, l, u = miosqp.add_bounds(i_idx, 0., 1., A, l, u)\n\n if solver == 'gurobi':\n # Solve with gurobi\n prob = mpbpy.QuadprogProblem(P, q, A, l, u, i_idx, i_l, i_u)\n res_gurobi = prob.solve(solver=mpbpy.GUROBI,\n verbose=False, Threads=1)\n if res_gurobi.status != 'optimal':\n import ipdb\n ipdb.set_trace()\n solve_time_temp[j] = 1e3 * res_gurobi.cputime\n\n elif solver == 'miosqp':\n # Define problem settings\n miosqp_settings = {\n # integer feasibility tolerance\n 'eps_int_feas': 1e-03,\n # maximum number of iterations\n 'max_iter_bb': 1000,\n # tree exploration rule\n # [0] depth first\n # [1] two-phase: depth first until first incumbent and then best bound\n 'tree_explor_rule': 1,\n # branching rule\n # [0] max fractional part\n 'branching_rule': 0,\n 'verbose': False,\n 'print_interval': 1}\n\n osqp_settings = {'eps_abs': 1e-03,\n 'eps_rel': 1e-03,\n 'eps_prim_inf': 1e-04,\n 'verbose': False}\n\n model = miosqp.MIOSQP()\n model.setup(P, q, A, l, u, i_idx, i_l, i_u,\n miosqp_settings,\n osqp_settings)\n res_miosqp = model.solve()\n\n # DEBUG (check if solutions match)\n # prob = mpbpy.QuadprogProblem(P, q, A, l, u, i_idx, i_l, i_u)\n # res_gurobi = prob.solve(solver=mpbpy.GUROBI, verbose=False)\n # if (np.linalg.norm(res_gurobi.x - res_miosqp.x) /\n # np.linalg.norm(res_gurobi.x)) > 1e-02:\n # import ipdb; ipdb.set_trace()\n#\n # import ipdb; ipdb.set_trace()\n\n if res_miosqp.status != miosqp.MI_SOLVED:\n import ipdb\n ipdb.set_trace()\n \n # Solution time \n solve_time_temp[j] = 1e3 * res_miosqp.run_time\n\n # Store OSQP time in percentage\n if solver == 'miosqp':\n osqp_solve_time_temp[j] = \\\n 100 * (res_miosqp.osqp_solve_time / res_miosqp.run_time)\n\n # Get time statistics\n std_solve_time[i] = np.std(solve_time_temp)\n avg_solve_time[i] = np.mean(solve_time_temp)\n max_solve_time[i] = np.max(solve_time_temp)\n min_solve_time[i] = np.min(solve_time_temp)\n\n # Store also OSQP time\n if solver == 'miosqp':\n avg_osqp_solve_time[i] = np.mean(osqp_solve_time_temp)\n\n # Create pandas dataframe for the results\n df_dict = {'n': n_vec,\n 'm': m_vec,\n 'p': p_vec,\n 't_min': min_solve_time,\n 't_max': max_solve_time,\n 't_avg': avg_solve_time,\n 't_std': std_solve_time}\n\n # Store also OSQP time\n if solver == 'miosqp':\n df_dict.update({'t_osqp_avg': avg_osqp_solve_time})\n\n timings = pd.DataFrame(df_dict)\n\n return timings", "def example_3():\n\n # maze = klyubin_world()\n maze = mazeworld.door_world()\n emptymaze = MazeWorld(maze.height, maze.width)\n # maze = mazeworld.tunnel_world()\n n_step = 3\n start = time.time()\n initpos = np.random.randint(maze.dims[0], size=2)\n initpos = [1,4]\n s = maze._cell_to_index(initpos)\n T = emptymaze.compute_model()\n B = maze.compute_model()\n E = maze.compute_empowerment(n_step = n_step).reshape(-1)\n n_s, n_a, _ = T.shape\n agent = EmpowermentMaximiser(alpha=0.1, gamma=0.9, T = T, n_step=n_step, n_samples=1000, det=1.)\n steps = int(10000) \n visited = np.zeros(maze.dims)\n tau = np.zeros(steps)\n D_emp = np.zeros(steps)\n D_mod = n_s*n_a*np.ones(steps)\n for t in range(steps):\n # append data for plotting \n tau[t] = agent.tau\n D_emp[t] = np.mean((E - agent.E)**2)\n D_mod[t] = D_mod[t] - np.sum(np.argmax(agent.T, axis=0) == np.argmax(B, axis=0))\n a = agent.act(s)\n pos = maze._index_to_cell(s)\n visited[pos[0],pos[1]] += 1\n s_ = maze.act(s,list(maze.actions.keys())[a])\n agent.update(s,a,s_)\n s = s_\n print(\"elapsed seconds: %0.3f\" % (time.time() - start) )\n plt.figure(1)\n plt.title(\"value map\")\n Vmap = np.max(agent.Q, axis=1).reshape(*maze.dims)\n maze.plot(colorMap= Vmap )\n plt.figure(2)\n plt.title(\"subjective empowerment\")\n maze.plot(colorMap= agent.E.reshape(*maze.dims))\n plt.figure(3)\n plt.title(\"tau\")\n plt.plot(tau)\n plt.figure(4)\n plt.scatter(agent.E, visited.reshape(n_s))\n plt.xlabel('true empowerment')\n plt.ylabel('visit frequency')\n plt.figure(5)\n plt.title(\"visited\")\n maze.plot(colorMap=visited.reshape(*maze.dims))\n fig, ax1 = plt.subplots()\n red = 'tab:red'\n ax1.set_xlabel('time')\n ax1.set_ylabel('MSE of empowerment map', color=red)\n ax1.plot(D_emp, color=red)\n ax1.tick_params(axis='y', labelcolor=red)\n ax2 = ax1.twinx() \n ax2.set_ylabel('Model disagreement', color='tab:blue') \n ax2.plot(D_mod, color='tab:blue')\n ax2.tick_params(axis='y', labelcolor='tab:blue')\n plt.show()", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()", "def inverse_q_learning(feature_matrix,nA, gamma, transitions, alpha_r, alpha_q, alpha_sh, epochs, real_distribution):\n nS = feature_matrix.shape[0]\n\n \n # initialize tables for reward function, value functions and state-action visitation counter.\n r = np.zeros((nS, nA))\n q = np.zeros((nS, nA))\n q_sh = np.zeros((nS, nA))\n state_action_visitation = np.zeros((nS, nA))\n\n for i in range(epochs):\n if i%10 == 0:\n print(\"Epoch %s/%s\" %(i+1, epochs))\n \n for traj in transitions:\n for (s, a, _, ns) in traj:\n state_action_visitation[s][a] += 1\n d = False # no terminal state\n\n # compute shifted q-function.\n q_sh[s, a] = (1-alpha_sh) * q_sh[s, a] + alpha_sh * (gamma * (1-d) * np.max(q[ns]))\n \n # compute log probabilities.\n sum_of_state_visitations = np.sum(state_action_visitation[s])\n log_prob = np.log((state_action_visitation[s]/sum_of_state_visitations) + epsilon)\n \n # compute eta_a and eta_b for Eq. (9).\n eta_a = log_prob[a] - q_sh[s][a]\n other_actions = [oa for oa in range(nA) if oa != a]\n eta_b = log_prob[other_actions] - q_sh[s][other_actions]\n sum_oa = (1/(nA-1)) * np.sum(r[s][other_actions] - eta_b)\n\n # update reward-function.\n r[s][a] = (1-alpha_r) * r[s][a] + alpha_r * (eta_a + sum_oa)\n\n # update value-function.\n q[s, a] = (1-alpha_q) * q[s, a] + alpha_q * (r[s, a] + gamma * (1-d) * np.max(q[ns]))\n s = ns\n\n # compute Boltzmann distribution.\n boltzman_distribution = []\n for s in range(nS):\n boltzman_distribution.append([])\n for a in range(nA):\n boltzman_distribution[-1].append(np.exp(q[s][a]))\n boltzman_distribution = np.array(boltzman_distribution)\n boltzman_distribution /= np.sum(boltzman_distribution, axis=1).reshape(-1, 1)\n return q, r, boltzman_distribution", "def marcovNuc (i = random.choice(stateSpace), step = 100):\n # matrix of transition probabilities\n #matrix = [[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]] \n matrix = [[0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1]] \n step += 1 # add one to the range because we remove it at the end\n sims = [] # List to hold the results of the Marcov chain\n sims.append(i) # append the seed value to the sims list\n for x in range(step):\n \n if sims[-1] == 'A':\n w = np.random.random() # Random number generator\n # the next set of if statements determine where the random number \n # sits on the number line of probabilities\n if matrix[0][0] > w:\n sims.append('A')\n elif matrix[0][1] + matrix[0][0] > w:\n sims.append('C')\n elif matrix[0][2] + matrix[0][1] + matrix[0][0] > w:\n sims.append('G')\n else:\n sims.append('T')\n elif sims[-1] == 'C':\n x = np.random.random()\n if matrix[1][0] > x:\n sims.append('A')\n elif matrix[1][1] + matrix[1][0] > x:\n sims.append('C')\n elif matrix[1][2] + matrix[1][1] + matrix[1][0] > x:\n sims.append('G')\n else:\n sims.append('T')\n \n elif sims[-1] == 'G':\n y = np.random.random()\n if matrix[2][0] > y:\n sims.append('A')\n elif matrix[2][1] + matrix[2][0] > y:\n sims.append('C')\n elif matrix[2][2] + matrix[2][1] + matrix[2][0] > y:\n sims.append('G')\n else:\n sims.append('T')\n\n else:\n z = np.random.random()\n if matrix[3][0] > z:\n sims.append('A')\n elif matrix[3][1] + matrix[3][0] > z:\n sims.append('C')\n elif matrix[3][2] + matrix[3][1] + matrix[3][0] > z:\n sims.append('G')\n else:\n sims.append('T')\n\n return sims[1:-1] # remove the initial value (the seed)", "def backward(log_emlik, log_startprob, log_transmat):\n N, M = log_emlik.shape\n backward_prob = np.zeros((N,M))\n\n backward_prob[N-1, :] = 0.0\n\n for i in range(N-2,-1,-1):\n for k in range(M):\n # probability of transitioning from k to state l * probability of emitting symbol at state l at ts i+1 * recursive backward probability\n backward_prob[i,k] = logsumexp(log_transmat[k,:] + log_emlik[i+1,:] + backward_prob[i+1,:])\n\n return backward_prob", "def prior_params_tree(self):\n id = {name:i for i, name in enumerate(list(self.tree.keys()))}\n n_nodes = len(id)\n dist_mx = np.zeros((n_nodes, n_nodes))\n\n for node1, edges in self.tree.items():\n for node2, dist in edges.dist:\n dist_mx[id[node1], id[node2]] = dist\n dist_mx[id[node2], id[node1]] = dist\n\n # while np.count_nonzero(dist_mx) < (n_nodes ** 2 - n_nodes):\n for _ in range(20):\n for i, j in combinations(range(n_nodes), 2):\n if dist_mx[i,j] > 0:\n continue\n row_i = dist_mx[i]\n row_j = dist_mx[j]\n value = (row_i + row_j) * (row_i > 0) * (row_j > 0)\n dist_mx[i, j] = dist_mx[j, i] = - max(np.unique(value))\n dist_mx = np.abs(dist_mx)\n\n evolve_rate = []\n for node1, node2 in combinations(self.m_cov.keys(), 2):\n mx_cov_dist = np.abs(self.m_cov[node1] - self.m_cov[node2])\n elements = mx_cov_dist[np.triu_indices(len(mx_cov_dist))]\n norm_elements = elements / dist_mx[id[node2], id[node1]]\n evolve_rate += list(norm_elements)\n\n\n\n df = np.mean([p.shape[0] for _, p in self.m_profiles.items()])\n p_theta_alpha = df/2\n # p_theta_alpha = 4\n p_theta_beta = np.percentile(evolve_rate, 75) * (p_theta_alpha - 1)\n # print(p_theta_alpha, p_theta_beta)\n return p_theta_alpha, p_theta_beta", "def original_solution():\n matrix = get_data()\n # Construct Graph\n G = nx.DiGraph()\n rows, cols = len(matrix), len(matrix[0])\n for r in xrange(rows):\n for c in xrange(cols):\n if 0 < c:\n G.add_edge(r*cols + c, r*cols + c - 1, weight=matrix[r][c-1])\n if c < cols-1:\n G.add_edge(r*cols + c, r*cols + c + 1, weight=matrix[r][c+1])\n if 0 < r:\n G.add_edge(r*cols + c, (r-1)*cols + c, weight=matrix[r-1][c])\n if r < rows-1:\n G.add_edge(r*cols + c, (r+1)*cols + c, weight=matrix[r+1][c])\n # Calculate shortest path\n path = nx.shortest_path(G, 0, rows*cols-1, weighted=True)\n \n # Get cost for path\n s = 0\n for p in path:\n c = p % cols\n r = (p - c) / rows\n s += matrix[r][c]\n return s", "def astar_multi(maze):\n # TODO: Write your code here\n gFunction = {}\n frontier = PriorityQueue()\n path = []\n ret = []\n MSTLengths = {}\n edges = {}\n\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives)\n gFunction[start] = 0\n frontier.put(start) \n getEdgeWeights(maze, objectives, edges) # init edge weights for MST\n\n while not frontier.empty():\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n objectivesLeft = currentState.objectives()\n\n if objectivesLeft.count(currentCell) != 0:\n objectivesLeft.remove(currentCell)\n\n # all objectives found, initialise backtrace and exit loop\n if len(objectivesLeft) == 0:\n path.clear()\n ret.clear()\n path.append(currentState)\n ret.append(currentCell)\n break\n \n # if we have already calculated MST length we can reuse value\n # else calculate MST length for this state and store it.\n length = 0\n if str(objectivesLeft) in MSTLengths:\n length = MSTLengths[str(objectivesLeft)]\n else:\n length = getMSTLength(objectivesLeft.copy(), maze, edges)\n MSTLengths[str(objectivesLeft)] = length\n\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectivesLeft)\n gVal= gFunction[currentState] + 1\n\n if neighbor not in gFunction or gVal < gFunction[neighbor]:\n\n neighbor.setParent(currentState)\n gFunction[neighbor] = gVal\n\n hFunction = []\n for j in objectivesLeft:\n hFunction.append(abs(j[0] - i[0]) + abs(j[1] - i[1]) + length) # use MST length + manhatten distance to nearest objective as heuristic.\n\n hVal = min(hFunction)\n\n neighbor.setfFunction(gFunction[neighbor] + hVal)\n frontier.put(neighbor)\n\n # backtrace\n while path[0]!= start:\n \n currentCell = path[0]\n path.insert(0, currentCell.parent())\n ret.insert(0, currentCell.parent().cell())\n\n return ret", "def compute_adj_matrix_fitness(solution):\n\n solution_fitness = 0.0\n\n for index in range(len(solution)):\n waypoint1 = solution[index - 1]\n waypoint2 = solution[index]\n solution_fitness += adj_matrix[waypoint1, waypoint2]\n\n return solution_fitness", "def _brute_force(self):\n if self.N > 9:\n #print(\"Input set is too big for brute force estimation.\")\n self.best_path = None\n else:\n #print(\"Number of permutations to check: {}\".format(math.factorial(self.N)))\n #init = \n A = self._P + np.finfo(np.float).eps\n A = (A + (1-A).T)/2\n for i in range(A.shape[0]):\n A[i,i] = np.finfo(np.float).eps\n init = (A>0.5).sum(axis=1).argsort()[::-1]\n #--- use log(p(Y=1\\mid s',s)) to shift multiplication to sum\n lP = np.log(A)\n for i in range(lP.shape[0]):\n lP[i,i] = 0\n #init_cost = 0\n ##--- lP[x:x+1] está MAL hay que sumar respecto a i+1 en z, no en lP.\n #for i in range(len(init)-1):\n # init_cost += lP[init[i],init[i+1]:].sum()\n z_star = []\n z_cost = -np.inf\n for z in permutations(range(self.N)):\n cost = 0\n for i in range(len(z)-1):\n cost += lP[z[i],z[i+1:]].sum()\n if cost > z_cost:\n z_cost = cost\n z_star = z\n self.best_path = np.array(z_star)", "def iterations(self):\n i = 0\n stateVectorConv = self.stateVectorConvThreshold * 1.0e6\n n = len(self.model.stateVector)\n self.answer = None\n \n while ((i < self.maxiter) \n and (stateVectorConv > self.stateVectorConvThreshold)\n ):\n \n F, K = self.model()\n \n if np.any(np.isnan(F)) or np.any(np.isnan(K)):\n m = \"Iteration {0} failure of model.\"\n raise OptimalEstimationException(m.format(i))\n \n if self.model.verbose > 0:\n self.model.plot(i+1, stateVectorConv)\n \n try:\n self.DecomposeJacobian(K)\n except np.linalg.LinAlgError:\n m = \"Iteration {0} failure in decomposition.\"\n raise OptimalEstimationException(m.format(i))\n \n statevectorOffset = (self.V.T * self.priorSinvh * \n np.matrix(np.array(self.model.stateVector) - np.array(self.model.prior) ).T)\n measurementOffset = (self.U.T * self.errSinvh * \n np.matrix(self.model.observation - F).T)\n \n newState = np.matrix((self.w * \n (measurementOffset.A1 + \n self.w * statevectorOffset.A1))/(self.w**2+1.0)).T\n newState = self.priorSh * self.V * newState\n newState = newState.A1 + self.model.prior\n \n stateVectorConv = ((np.matrix(newState - self.model.stateVector) * \n self.Sinv * np.matrix(newState - self.model.stateVector).T)/n)[0,0]\n self.model.stateVector = newState\n\n if i == 0:\n \n stateVectorConv = self.stateVectorConvThreshold * 1.0e6\n \n print('cost Function for iteration {}:'.format(i), self.costFunction)\n\n i += 1\n \n F, K = self.model()\n if self.model.verbose > 0:\n self.model.plot(i+1, stateVectorConv)\n \n try:\n self.DecomposeJacobian(K)\n except np.linalg.LinAlgError:\n raise OptimalEstimationException(\"Failure in decomposition.\")\n \n Wplus2 = np.matrix(np.diag(1.0/(self.w**2+1.0)))\n self.model.covariance = (self.priorSh * self.V * Wplus2 * \n self.V.T * self.priorSh)\n \n\n \n return i, stateVectorConv", "def stationary_dist(a, sleep_probs):\n if len(a) - 1 != len(sleep_probs):\n raise ValueError('There should be exactly one sink vertex.')\n\n # n is the number of non-sink vertices\n n = len(a) - 1\n # deg is a list of degrees of the non-sink vertices\n deg = [len(a[v]) for v in range(n)]\n\n # t is a list of states in the transition matrix.\n # Initialize t with the state where all non-sink vertices have 1 active particle.\n t = [[1] * n]\n # print(t)\n t_absorb_idx = [] # list of indices of t that correspond to absorbing states\n # m is a Markov transition matrix between states, implemented as a list of lists in row-major order.\n m = [[0]]\n # q is a queue of current states to check.\n # Initialize q with the state where non-sink vertices have 1 active particle.\n # Each state in q includes at the end its index in t.\n init_state_q = [1] * n\n init_state_q.append(0)\n q = deque([init_state_q])\n while q:\n state = q.popleft()\n # Pick the vertex v to fire.\n # Fire the (only) vertex with 2 active particles; otherwise, fire the first vertex with 1 active particle.\n v = 0\n while v < n:\n if state[v] == 2:\n break\n v += 1\n if v == n:\n v = 0\n while v < n:\n if state[v] == 1:\n break\n v += 1\n if v == n:\n # The state is absorbing.\n t_absorb_idx.append(state[n])\n m[state[n]][state[n]] = 1\n else:\n if state[v] == 2:\n # The vertex v has 2 active particles.\n temp_state = copy.copy(state)\n t_idx = temp_state.pop()\n\n # Consider the active particle trying to fall asleep (which leaves the state unchanged).\n m[t_idx][t_idx] = sleep_probs[v]\n\n temp_state[v] = 1\n # Consider the active particle at v jumping to a neighbor.\n for i in a[v]:\n if i != n:\n # The active particle at v jumps to a non-sink vertex.\n temp_state_i_old = temp_state[i]\n if temp_state[i] == 's':\n temp_state[i] = 2\n else:\n temp_state[i] += 1\n try:\n # Check if we transition to a state that we have seen before (in t).\n new_idx = t.index(temp_state)\n except ValueError:\n # We transition to a new state.\n new_state = copy.copy(temp_state)\n t.append(new_state)\n new_idx = len(t) - 1\n new_state_q = copy.copy(new_state)\n new_state_q.append(new_idx)\n q.append(new_state_q)\n for row in m:\n row.append(0)\n m.append([0] * len(t))\n m[t_idx][new_idx] = (1 - sleep_probs[v]) / deg[v]\n temp_state[i] = temp_state_i_old\n else:\n # The active particle at v jumps to the sink.\n try:\n new_idx = t.index(temp_state)\n except ValueError:\n new_state = copy.copy(temp_state)\n t.append(new_state)\n new_idx = len(t) - 1\n new_state_q = copy.copy(new_state)\n new_state_q.append(new_idx)\n q.append(new_state_q)\n for row in m:\n row.append(0)\n m.append([0] * len(t))\n m[t_idx][new_idx] = (1 - sleep_probs[v]) / deg[v]\n\n if state[v] == 1:\n # The vertex v has 1 active particle.\n temp_state = copy.copy(state)\n t_idx = temp_state.pop()\n\n # Consider the active particle at v falling asleep.\n temp_state[v] = 's'\n try:\n new_idx = t.index(temp_state)\n except ValueError:\n new_state = copy.copy(temp_state)\n t.append(new_state)\n new_idx = len(t) - 1\n new_state_q = copy.copy(new_state)\n new_state_q.append(new_idx)\n q.append(new_state_q)\n for row in m:\n row.append(0)\n m.append([0] * len(t))\n m[t_idx][new_idx] = sleep_probs[v]\n\n temp_state[v] = 0\n # Consider the active particle at v jumping to a neighbor.\n for i in a[v]:\n if i != n:\n temp_state_i_old = temp_state[i]\n if temp_state[i] == 's':\n temp_state[i] = 2\n else:\n temp_state[i] += 1\n try:\n new_idx = t.index(temp_state)\n except ValueError:\n new_state = copy.copy(temp_state)\n t.append(new_state)\n new_idx = len(t) - 1\n new_state_q = copy.copy(new_state)\n new_state_q.append(new_idx)\n q.append(new_state_q)\n for row in m:\n row.append(0)\n m.append([0] * len(t))\n m[t_idx][new_idx] = (1 - sleep_probs[v]) / deg[v]\n temp_state[i] = temp_state_i_old\n else:\n try:\n new_idx = t.index(temp_state)\n except ValueError:\n new_state = copy.copy(temp_state)\n t.append(new_state)\n new_idx = len(t) - 1\n new_state_q = copy.copy(new_state)\n new_state_q.append(new_idx)\n q.append(new_state_q)\n for row in m:\n row.append(0)\n m.append([0] * len(t))\n m[t_idx][new_idx] = (1 - sleep_probs[v]) / deg[v]\n\n # From m, t, and t_absorb_idx, we can calculate the probabilities of ending up at each absorbing state.\n # the transition matrix between transient states\n m_trans_trans = \\\n sympy.Matrix([[row[j] for j in range(len(m)) if j not in t_absorb_idx]\n for row in [m[i] for i in range(len(m)) if i not in t_absorb_idx]])\n # the transition matrix from transient states to absorbing states\n m_trans_absorb = \\\n sympy.Matrix([[row[j] for j in t_absorb_idx]\n for row in [m[i] for i in range(len(m)) if i not in t_absorb_idx]])\n\n t0 = time.process_time()\n print(\"Time to compute transition matrix: \" + str(t0))\n\n # The probabilities of ending up at each absorbing state are given by the row vector\n # ((I - m_trans_trans)^T \\ e1)^T * m_trans_absorb\n # The linear solve is the most time-consuming part of the program.\n # We use solver() (which seems to be faster than sympy.linsolve() and sympy.solve()):\n ell = len(t) - len(t_absorb_idx)\n mat = sympy.SparseMatrix(ell, ell, {})\n for i in range(ell):\n mat[i, i] = 1\n for i in range(ell):\n for j in range(ell):\n if m_trans_trans[i, j] != 0:\n mat[i, j] -= m_trans_trans[i, j]\n dist = solver.inverse(mat, [0], list(range(ell))) * m_trans_absorb\n\n t1 = time.process_time() - t0\n print(\"Time to compute final answer: \" + str(t1))\n\n return [t[i] for i in t_absorb_idx], dist", "def get_viterbi_pairwise_potentials(self):\n all_labels = self.labels.idx_to_tag\n num_labels = len(all_labels)\n transition_matrix = torch.zeros([num_labels, num_labels])\n\n for i, previous_label in all_labels.items():\n for j, label in all_labels.items():\n # I labels can only be preceded by themselves or\n # their corresponding B tag.\n if i != j and label[0] == 'I' and not previous_label == 'B' + label[1:]:\n transition_matrix[i, j] = float(\"-inf\")\n return transition_matrix", "def kViterbi(crf_obj, emissions, topK, mask):\n assert topK >=1\n if topK == 1:\n seq_length = emissions.size(1)\n best_path = crf_obj.decode(emissions, mask)\n observation = pad_seq(best_path, seq_length, crf_obj.batch_first, 0)\n best_probs = calculate_prob_byObser(crf_obj, emissions, observation, mask)\n return best_path, best_probs.squeeze()\n\n crf_obj._validate(emissions, mask=mask)\n if mask is None:\n mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8)\n if crf_obj.batch_first:\n emissions = emissions.transpose(0, 1)\n mask = mask.transpose(0, 1)\n normalizer = crf_obj._compute_normalizer(emissions, mask)\n # ===============start main part========================\n # emissions: (seq_length, batch_size, num_tags)\n # mask: (seq_length, batch_size)\n assert emissions.dim() == 3 and mask.dim() == 2\n assert emissions.shape[:2] == mask.shape\n assert emissions.size(2) == crf_obj.num_tags\n assert mask[0].all()\n\n seq_length, batch_size = mask.shape\n\n # Start transition and first emission\n # score is a tensor of size(batch_size, num_tags, topK) where for each\n # batch, value at tags i and top j stores the scores of the j-th best tag\n # sequence so far that ends with tag i\n #\n # pre_states saves the previous tag where the j-th best path that ends with tag i currently\n score = emissions.new_zeros((seq_length, batch_size, crf_obj.num_tags, topK))\n score[0,:,:,0] = crf_obj.start_transitions + emissions[0] # batch x num_tags\n\n pre_states = np.zeros((seq_length, batch_size, crf_obj.num_tags, topK), int)\n for i in range(crf_obj.num_tags):\n for b in range(batch_size):\n for k in range(topK):\n pre_states[0,b,i,k] = i # should be start transition\n\n # The ranking of multiple paths through same state\n rank = np.zeros((seq_length, batch_size, crf_obj.num_tags, topK), int)\n for t in range(1, seq_length):\n next_score_list = []\n for k in range(topK):\n broadcast_score = score[t-1,:,:,k].unsqueeze(2) #(batch_size, num_tags, 1)\n broadcast_emissions = emissions[t].unsqueeze(1) #(batch_size, 1, num_tags)\n\n # Compute the score tensor of size (batch_size, num_tags, num_tags)\n # where for each sample, entry at row i and column j stores\n # the sum of scores of all possible tag sequences so far that end\n # with transitioning from tag i to tag j and emitting\n # shape: (batch_size, num_tags, num_tags)\n next_score = broadcast_score + crf_obj.transitions + broadcast_emissions\n next_score_list.append(next_score)\n\n for b in range(batch_size):\n if mask[t,b]:\n for cur_state in range(crf_obj.num_tags):\n h = []\n for pre_state in range(crf_obj.num_tags):\n for k in range(topK):\n heapq.heappush(h, (-1*next_score_list[k][b, pre_state, cur_state], pre_state))\n\n # Get the sorted list\n h_sorted = [heapq.heappop(h) for _ in range(topK)] #get topK path into cur_state\n # We need to keep a ranking if a path crosses a state more than once\n rankDict = dict()\n # Retain the topK scoring paths\n for k in range(topK):\n score[t, b, cur_state, k] = score[t, b, cur_state, k] + (h_sorted[k][0].data * -1)\n pre_states[t, b, cur_state, k] = h_sorted[k][1]\n state = h_sorted[k][1]\n if state in rankDict:\n rankDict[state] = rankDict[state]+1\n else:\n rankDict[state] = 0\n rank[t, b, cur_state, k] = rankDict[state]\n else:\n for cur_state in range(crf_obj.num_tags):\n for k in range(topK):\n score[t, b, cur_state, k]=score[t-1, b, cur_state, k]\n\n\n batch_path = []\n batch_path_prob = []\n seq_ends = mask.long().sum(dim=0) - 1 # seq_len x batch # assume seq_ends=8, seq_len=9\n for b in range(batch_size):\n h = []\n for cur_state in range(crf_obj.num_tags):\n for k in range(topK):\n heapq.heappush(h, ( -1 * (score[seq_ends[b], b, cur_state, k]+crf_obj.end_transitions[cur_state]),\n cur_state, k))\n h_sorted = [heapq.heappop(h) for _ in range(topK)]\n k_list = np.zeros((topK, seq_ends[b]+1), int) # k x 9\n k_list_probs = list()\n for k in range(topK):\n prob = h_sorted[k][0]\n state = h_sorted[k][1]\n rankK = h_sorted[k][2]\n\n k_list_probs.append((prob*-1)-(normalizer[b]))\n k_list[k][seq_ends[b]] = state # assign index 8 == last one\n for t in range(seq_ends[b]-1, -1, -1): # t = 7,6,5,4,3,2,1,0\n nextState = k_list[k][t+1]\n preState = pre_states[t+1, b, nextState, rankK]\n k_list[k][t] = preState\n rankK = rank[t+1,b,nextState,rankK]\n batch_path.append(k_list.tolist())\n batch_path_prob.append(k_list_probs)\n if crf_obj.batch_first:\n batch_probs = recalculate_probs(crf_obj, batch_path, emissions.transpose(0,1), mask.transpose(0,1), topK)\n else:\n batch_probs = recalculate_probs(crf_obj, batch_path, emissions, mask, topK)\n return batch_path, batch_probs", "def graph_estimate(S, lambdaL, p, maxdf, threshold=1e-4, max_iter=10000):\n nlambda = lambdaL.shape[0]\n x = np.zeros(p * maxdf * nlambda)\n col_cnz = np.zeros(p + 1).astype(int)\n row_idx = np.zeros(p * maxdf * nlambda).astype(int)\n idx_a = np.zeros(p).astype(int)\n w1 = np.zeros(p)\n\n cnz = 0\n for m in range(p):\n idx_i = np.ones(p).astype(int)\n idx_i[m] = 0\n w0 = np.zeros(p)\n size_a = 0\n\n for i in range(nlambda):\n ilambda = lambdaL[i]\n gap_ext = 1\n iter_ext = 0\n while gap_ext != 0 and iter_ext < max_iter:\n size_a_prev = size_a\n for j in range(p):\n if idx_i[j] == 1:\n r = S[m, j]\n for k in range(size_a):\n rss_idx = idx_a[k]\n r -= S[j, rss_idx] * w0[rss_idx]\n\n if abs(r) > ilambda:\n if r >= 0:\n w1[j] = r - ilambda\n else:\n w1[j] = r + ilambda\n idx_a[size_a] = j\n size_a += 1\n idx_i[j] = 0\n else:\n w1[j] = 0\n\n w0[j] = w1[j]\n\n gap_ext = size_a - size_a_prev\n\n gap_int = 1\n iter_int = 0\n while gap_int > threshold and iter_int < max_iter:\n tmp1 = 0\n tmp2 = 0\n for j in range(size_a):\n w_idx = idx_a[j]\n r = S[m, w_idx] + w0[w_idx]\n\n for k in range(size_a):\n rss_idx = idx_a[k]\n r -= S[w_idx, rss_idx] * w0[rss_idx]\n\n if abs(r) > ilambda:\n if r >= 0:\n w1[w_idx] = r - ilambda\n else:\n w1[w_idx] = r + ilambda\n tmp2 += abs(w1[w_idx])\n else:\n w1[w_idx] = 0\n\n tmp1 += abs(w1[w_idx] - w0[w_idx])\n w0[w_idx] = w1[w_idx]\n gap_int = tmp1 / tmp2\n iter_int += 1\n\n junk_a = 0\n for j in range(size_a):\n w_idx = idx_a[j]\n if w1[w_idx] == 0:\n junk_a += 1\n idx_i[w_idx] = 1\n else:\n idx_a[j - junk_a] = w_idx\n size_a -= junk_a\n iter_ext += 1\n\n for j in range(size_a):\n w_idx = idx_a[j]\n x[cnz] = w1[w_idx]\n row_idx[cnz] = i * p + w_idx\n cnz += 1\n col_cnz[m + 1] = cnz\n\n return col_cnz, row_idx, x", "def terrain_pathfinding(vfile, efile, crossing_time, transform_cost, start, end):\n # no path or time needs to be taken if start == end\n if start == end:\n return (0, [])\n # Calls the Graph class to create the graph from vfile and efile\n graph = Graph(vfile, efile)\n\n # Creates a priority queue\n pqueue = queue.PriorityQueue()\n # Each vertex is split into 3 subvertices. distance_to_vertex[i][0] is the distance to the wheel subvertex on the\n # ith vertex, [i][1] is tank and [i][2] is hover\n # O(V)\n distance_to_vertex = [[inf, inf, inf] for _ in range(len(graph.vertex_properties))]\n # Starts in wheel form so to be at the start in wheel form is 0 while being at the start in hover or tank form\n # will be the transform cost\n distance_to_vertex[start] = [0, transform_cost, transform_cost]\n # O(V)\n # This will store the time taken to traverse through the vertex in each of the 3 forms\n distance_through_vertex = [[inf, inf, inf] for _ in range(len(graph.vertex_properties))]\n\n # O(V)\n visited = [[0,0,0] for _ in range(len(graph.vertex_properties))]\n # O(V)\n # This keeps track of what vertex and vehicle type (subvertex) is used to reach any vertex in each of its vehicle\n # types. This stores specifically the vertex and vehicle type just before in the shortest path.\n pred = [[(None, None),(None, None),(None, None)] for _ in range(len(graph.vertex_properties))]\n pred[start] = [(0, 0), (0,0), (0,0)]\n\n # Saves the time required to travel through each vertex in each vehicle type (subvertex)\n # O(V)\n for i in range(len(graph.vertex_properties)):\n if graph.vertex_properties[i] == 0:\n distance_through_vertex[i] = [crossing_time[\"wheel\"][\"plain\"], crossing_time[\"tank\"][\"plain\"],\n crossing_time[\"hover\"][\"plain\"]]\n elif graph.vertex_properties[i] == 1:\n distance_through_vertex[i] = [crossing_time[\"wheel\"][\"hill\"], crossing_time[\"tank\"][\"hill\"],\n crossing_time[\"hover\"][\"hill\"]]\n else:\n distance_through_vertex[i] = [crossing_time[\"wheel\"][\"swamp\"], crossing_time[\"tank\"][\"swamp\"],\n crossing_time[\"hover\"][\"swamp\"]]\n\n # Pushes every subvertex onto priority queue\n # O(V)\n for i in range(len(distance_to_vertex)):\n for j in range(len(distance_through_vertex[0])):\n pqueue.put((distance_to_vertex[i][j], i, j))\n\n # It runs through the priority queue until there is a path to reach the end vertex. The first path to the end vertex\n # will be the shortest path\n while not pqueue.empty() and visited[end] == [0,0,0]:\n # (distance to get to vertex in a certain vehicle type, the vertex ID, the vehicle type at the beginning of\n # iteration at the vertex)\n curr_distance_to, curr_vertex_ID, subvertex = pqueue.get()\n # If vertex has already been visited, ignore it. After it has been visited, the distance is locked in\n if visited[curr_vertex_ID][subvertex] == 1:\n continue\n # if vertex has been updated after this version was pushed onto the priority queue then ignore it\n if curr_distance_to != distance_to_vertex[curr_vertex_ID][subvertex]:\n continue\n visited[curr_vertex_ID][subvertex] = 1\n\n # Keeps track of what vertices were changed and therefore have to be added into the priority queue again\n relaxed_vertices = []\n edge_index_in_relax_vertices = -1\n # Runs through all the neighbours of the current vertex that has been popped off the priority queue\n for edge_destination in graph.edge_list[curr_vertex_ID]:\n edge_index_in_relax_vertices += 1\n\n # bitlist to check if any of the subvertices have been relaxed\n relaxed_vertices.append([edge_destination, 0, 0, 0])\n\n # From the vehicle type which is represented through subvertex, the following code determines the distance\n # to traverse through the current vertex in the vehicle type and reach the neighbouring vertex in a specific\n # vehicle type.\n # e.g. Current vertex ID = 3, subvertex = 0, neighbouring vertex ID = 4. The following code will determine\n # how long it takes to get to vertex 4 in wheel form and finish in wheel form, how long it takes to reach\n # vertex 4 in wheel form and finish in tank form (so has to transform once it reaches 4) and the same for\n # hover form.\n\n # To compute the distance, it looks at the distance to reach the current vertex in the vehicle form\n # (subvertex), how long it takes to traverse the current vertex in the form and then adds the transform cost\n # if the destination subvertex vehicle form is different to current vehicle form\n\n if subvertex == 0:\n\n # if the subvertex has already been finalised by being visited, it cannot be relaxed\n if visited[edge_destination][0] == 0:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][0] + \\\n distance_to_vertex[curr_vertex_ID][0]\n # If travelling through current vertex is a shorter path than previous path to next vertex then\n # relax by updating distance, pred list and relaxed_vertices list\n if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][0]:\n distance_to_vertex[edge_destination][0] = travel_through_current_vertex_to_next\n pred[edge_destination][0] = (curr_vertex_ID, 0)\n relaxed_vertices[edge_index_in_relax_vertices][1] = 1\n\n if visited[edge_destination][1] == 0:\n # If the destination is the end then it does not need to transform. Otherwise it should transform\n # once reaching destination vertex\n if edge_destination != end:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][0] + distance_to_vertex[curr_vertex_ID][0] + transform_cost\n else:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][0] + \\\n distance_to_vertex[curr_vertex_ID][0]\n if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][1]:\n distance_to_vertex[edge_destination][1] = travel_through_current_vertex_to_next\n pred[edge_destination][1] = (curr_vertex_ID, 0)\n relaxed_vertices[edge_index_in_relax_vertices][2] = 1\n\n if visited[edge_destination][2] == 0:\n if edge_destination != end:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][0] + \\\n distance_to_vertex[curr_vertex_ID][\n 0] + transform_cost\n else:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][0] + \\\n distance_to_vertex[curr_vertex_ID][0]\n if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][2]:\n distance_to_vertex[edge_destination][2] = travel_through_current_vertex_to_next\n pred[edge_destination][2] = (curr_vertex_ID, 0)\n relaxed_vertices[edge_index_in_relax_vertices][3] = 1\n\n if subvertex == 1:\n\n if visited[edge_destination][0] == 0:\n # If travelling through current vertex is a shorter path than previous path to next vertex then relax\n if edge_destination != end:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][1] + \\\n distance_to_vertex[curr_vertex_ID][1] + transform_cost\n else:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][1] + \\\n distance_to_vertex[curr_vertex_ID][1]\n if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][0]:\n distance_to_vertex[edge_destination][0] = travel_through_current_vertex_to_next\n pred[edge_destination][0] = (curr_vertex_ID, 1)\n relaxed_vertices[edge_index_in_relax_vertices][1] = 1\n\n if visited[edge_destination][1] == 0:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][1] + \\\n distance_to_vertex[curr_vertex_ID][1]\n if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][1]:\n distance_to_vertex[edge_destination][1] = travel_through_current_vertex_to_next\n pred[edge_destination][1] = (curr_vertex_ID, 1)\n relaxed_vertices[edge_index_in_relax_vertices][2] = 1\n\n if visited[edge_destination][2] == 0:\n if edge_destination != end:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][1] + \\\n distance_to_vertex[curr_vertex_ID][1] + transform_cost\n else:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][1] + \\\n distance_to_vertex[curr_vertex_ID][1]\n if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][2]:\n distance_to_vertex[edge_destination][2] = travel_through_current_vertex_to_next\n pred[edge_destination][2] = (curr_vertex_ID, 1)\n relaxed_vertices[edge_index_in_relax_vertices][3] = 1\n\n if subvertex == 2:\n # distance from current subvertex to next subvertex is the distance to travel through current\n # vertex as wheel + the distance to get to this vertex. Transforming before leaving current vertex. If at w subvertex\n # and pred is not a wheel then transform first then make transition\n if visited[edge_destination][0] == 0:\n if edge_destination != end:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][2] + \\\n distance_to_vertex[curr_vertex_ID][2] + transform_cost\n else:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][2] + \\\n distance_to_vertex[curr_vertex_ID][2]\n # If travelling through current vertex is a shorter path than previous path to next vertex then relax\n if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][0]:\n distance_to_vertex[edge_destination][0] = travel_through_current_vertex_to_next\n pred[edge_destination][0] = (curr_vertex_ID, 2)\n relaxed_vertices[edge_index_in_relax_vertices][1] = 1\n\n if visited[edge_destination][1] == 0:\n if edge_destination != end:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][2] + \\\n distance_to_vertex[curr_vertex_ID][2] + transform_cost\n else:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][2] + \\\n distance_to_vertex[curr_vertex_ID][2]\n if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][1]:\n distance_to_vertex[edge_destination][1] = travel_through_current_vertex_to_next\n pred[edge_destination][1] = (curr_vertex_ID, 2)\n relaxed_vertices[edge_index_in_relax_vertices][2] = 1\n\n if visited[edge_destination][2] == 0:\n travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][2] + \\\n distance_to_vertex[curr_vertex_ID][2]\n if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][2]:\n distance_to_vertex[edge_destination][2] = travel_through_current_vertex_to_next\n pred[edge_destination][2] = (curr_vertex_ID, 2)\n relaxed_vertices[edge_index_in_relax_vertices][3] = 1\n\n # If vertexes have been relaxed, repush them into the pqueue\n for i in range(edge_index_in_relax_vertices+1):\n for j in range(1, 4):\n if relaxed_vertices[i][j] == 1:\n # O(logV)\n # Push (distance to subvertex, vertex_id, subvertex_id) onto priority queue\n pqueue.put((distance_to_vertex[relaxed_vertices[i][0]][j-1], relaxed_vertices[i][0], j-1))\n\n # Determines which vehicle type (subvertex) has the shortest path to reach the end vertex\n min_distance_index = 0\n for x in range(1, len(distance_to_vertex[end])):\n if distance_to_vertex[end][x] < distance_to_vertex[end][min_distance_index]:\n min_distance_index = x\n\n # res stores the shortest distance to reach the end vertex and then a list of tuples showing the path taken to reach\n # the end vertex in the shortest path\n res = [distance_to_vertex[end][min_distance_index], []]\n\n # Stores the path back to front which will be reversed later\n res_temp = []\n # Converts subvertex IDs into string representing the vehicle type\n str = index_to_vehicle_type(pred[end][min_distance_index][1])\n res_temp.append((end, str))\n\n # pred_vertex is the subvertex directly before the current subvertex in the path\n pred_vertex = pred[end][min_distance_index]\n\n # While loop goes backwards from the end vertex until it reaches the start vertex and appends the path to res_temp\n # continues until the start vertex has been appended\n while res_temp[-1][0] != start:\n curr_vertex_ID, subvertex = pred_vertex\n vehicle_type = index_to_vehicle_type(subvertex)\n res_temp.append((curr_vertex_ID, vehicle_type))\n pred_vertex = pred[curr_vertex_ID][subvertex]\n\n # res_temp is in reverse order so this appends the path in the correct order to res to be returned\n for i in range(len(res_temp)-1, -1, -1):\n res[1].append(res_temp[i])\n return res", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n\n pq = PriorityQueue()\n visited = []\n start = problem.getStartState()\n mapper = {}\n \n mapper[problem.getStartState()] = None\n pq.push(problem.getStartState(), 1)\n\n while (not pq.isEmpty()):\n point = pq.pop()\n if problem.isGoalState(point):\n current = point\n l = []\n while mapper[current] != None:\n tup = mapper[current]\n l.append(tup[1])\n current = tup[0]\n l.reverse()\n print l\n return l\n #util.raiseNotDefined()\n if not (point in visited):\n visited.append(point)\n succs = problem.getSuccessors(point)\n succs.reverse()\n for child in succs:\n if not (child[0] in mapper):\n pq.push(child[0], child[2]) #child has (xy, direction, weight)\n mapper[child[0]] = point, child[1]\n # util.raiseNotDefined()", "def objective(trial):\n # The parameters that we will calibrate the model for are shown here.\n # Optuna trial i\n BOD = trial.suggest_uniform(\"BOD\", 0, 1) #Review ranges here\n k_r = trial.suggest_uniform(\"k_r\", 0, 1) #Review Ranges here \n \n def ChLa(t):\n return 1 # Need to link to data\n\n def I(x):\n return 1 # Need to link to data\n\n K_z = 2 * 10**(-5) # p.51\n a = K_z\n k_b = 0.1 # Table 5\n th_b = 1.047 # Table 5\n k_r = 0.1 # Table 5\n YCHO2 = 0.0083 # Table 5\n th_p = 1.036 # Table 5\n th_s = 1.065 # Table 5\n th_r = 1.047 # Table 5\n\n def Temp(t):\n \"\"\"\n Function that maps time to temperature\n \"\"\"\n return 20 # Need to link to data\n\n def P_max(t):\n return 9.6 * 1.036 **(Temp(t) - 20) # Eq. 4\n\n def L_min(t):\n I = 1 # Need to link to PAR data\n K_1 = 0.687 * 1.086**(Temp(t) - 20)\n K_2 = 15\n return I * (1 + 2 * np.sqrt(K_1 / K_2)) / (I + K_1 + I**2 / K_2) # Eq. 5\n \n # f deals with sink and source terms \n def f(x, t):\n return -1 / YCHO2 * k_r * th_r**(Temp(t) - 20) * ChLa(t) + P_max(t) * L_min(t) * ChLa(t) - k_b * th_b**(Temp(t)-20) * BOD \n\n L = 200 # Length of domain\n dt = 1 / 48 # Mesh spacing in t\n F = a * dt # a * dt / dx**2\n T = 100 # Simulation time stop\n\n # Solving the PDE\n DO, x, t, _ = solver_FE_simple(I, a, f, L, dt, F, T)\n \n # Creating some bogus targets while database errors are happening\n DO_data = DO + np.random.random(len(DO))\n\n # Using mean squared error as the measure of fit, where we want\n # to minimize this number\n return ((DO - DO_data)**2).mean()", "def calcProbStatePath(self, statesPath, transitionDict, availableStates):\n\n stateCount = 0\n for state in statesPath:\n if stateCount == 0:\n prevState = state\n stateCount += 1\n continue\n\n transition = str(prevState) + str(state)\n self.totalProbability *= float(transitionDict[transition])\n prevState = state\n beginningProb = 1/availableStates\n self.totalProbability *= beginningProb\n return self.totalProbability", "def get_matrix(self):\n matrix = np.zeros([len(self.states), len(self.states)])\n starting_states = []\n transitions = []\n\n for chords in self.training_data:\n states = []\n is_starting_state = True\n chunks = [chords[x:x+self.order] for x in range(0,\n len(chords), self.order)]\n for chunk in chunks:\n chunk_string = \" \".join(chunk)\n if is_starting_state:\n starting_states.append(chunk_string)\n is_starting_state = False\n states.append(chunk_string)\n\n for i in range(0, len(states)):\n if i < (len(states)) - 1:\n transitions.append([states[i], states[i + 1]])\n else:\n transitions.append([states[i]])\n\n self.starting_probabilities = np.zeros([len(self.states)])\n\n for transition in transitions:\n for row, row_contents in enumerate(self.transitions):\n for col, _ in enumerate(row_contents):\n if transition == self.transitions[row][col]:\n matrix[row][col] += 1\n\n for i, state in enumerate(self.states):\n for j, possible_state in enumerate(starting_states):\n if state == possible_state:\n self.starting_probabilities[j] += 1\n\n num = sum(self.starting_probabilities)\n for i, prob in enumerate(self.starting_probabilities):\n self.starting_probabilities[i] = prob / num\n\n for m in range(len(matrix)):\n num = sum(matrix[m])\n if int(num) is not 0:\n for i in range(len(matrix[m])):\n matrix[m][i] = (matrix[m][i] / num)\n else:\n matrix[m] = self.starting_probabilities\n return matrix", "def CalculateChebyPaths(self):\n Kmin, Kmax = self.Kmin, self.Kmax\n self.apath = array([0 for y in range(self.T)], dtype=float)\n self.cpath = array([0 for y in range(self.T)], dtype=float)\n self.npath = array([0 for y in range(self.T)], dtype=float)\n # generate each generation's asset, consumption and labor supply forward\n for y in range(self.T-1): # y = 0, 1,..., 58\n self.cpath[y] = self.chebeval(array([self.apath[y]]),self.ac[y],Kmin,Kmax)\n # if self.cpath[y] < 0:\n # self.cpath[y] = 0\n if y >= self.W:\n income = self.b\n else:\n self.npath[y] = self.chebeval(array([self.apath[y]]),self.an[y],Kmin,Kmax)\n income = (1-self.tau)*self.w*self.npath[y]\n self.apath[y+1] = (1+self.r)*self.apath[y] + income - self.cpath[y]\n self.upath[y] = self.util(self.cpath[y], self.npath[y])\n # the oldest generation's consumption and labor supply\n self.cpath[self.T-1] = (1+self.r)*self.apath[self.T-1] + self.b\n # self.cpath[self.T-1] = self.chebeval(array([self.apath[self.T-1]]),self.ac[self.T-1],Kmin,Kmax)\n self.upath[self.T-1] = self.util(self.cpath[self.T-1], self.npath[self.T-1])\n # print self.cpath, self.apath, self.npath", "def createTransitionMatrix(self):\n transitionMatrix = np.zeros((1, self.n*self.m))\n\n for i in self.board:\n x1, y1 = i\n line = []\n\n if self.walls[x1][y1] is True:\n line = np.zeros((1, self.n*self.m))\n tmp = np.vstack((transitionMatrix, line.reshape((1, -1))))\n transitionMatrix = tmp\n continue\n\n count = 0\n left = self.walls[x1 - 1][y1]\n right = self.walls[x1 + 1][y1]\n down = self.walls[x1][y1 - 1]\n up = self.walls[x1][y1 + 1]\n list = [left, right, down, up]\n for near in list:\n if near is False:\n count = count + 1\n\n p = self.p\n\n if count == 0:\n line = np.zeros((1, self.n*self.m))\n tmp = np.vstack((transitionMatrix, line.reshape((1, -1))))\n transitionMatrix = tmp\n continue\n\n cmp = (1 - p)/count\n\n for j in self.board:\n x2, y2 = j\n walls = self.walls[x2][y2] is False\n\n if self.walls[x1 + 1][y1] is False:\n \"\"\"\n If East is a legal action\n \"\"\"\n if x2 == x1 + 1 and y2 == y1:\n line.append(p + cmp)\n\n elif x2 == x1 - 1 and y2 == y1 and walls:\n line.append(cmp)\n\n elif x2 == x1 and y2 == y1 + 1 and walls:\n line.append(cmp)\n\n elif x2 == x1 and y2 == y1 - 1 and walls:\n line.append(cmp)\n\n else:\n line.append(0)\n\n else:\n \"\"\"\n If East is not a legal action\n \"\"\"\n if x2 == x1 - 1 and y2 == y1 and walls:\n line.append(1/count)\n\n elif x2 == x1 and y2 == y1 + 1 and walls:\n line.append(1/count)\n\n elif x2 == x1 and y2 == y1 - 1 and walls:\n line.append(1/count)\n\n else:\n line.append(0)\n\n line = np.array(line)\n tmp = np.vstack((transitionMatrix, line.reshape((1, -1))))\n transitionMatrix = tmp\n\n return transitionMatrix[1:, :]", "def oim_node2vec_simple(\n df,\n df_feats,\n num_inf=10,\n sigma=4,\n c=0.1,\n epsilon=0.4,\n num_repeats=15,\n num_nodes_tim=-1,\n oracle=tim,\n):\n logger_tlu.debug(\"Started Online Influence Maximization...\")\n logger_tlu.debug(\"Setting parameters\")\n num_feats = df_feats.shape[1]\n num_edges_t = df.shape[0]\n\n # \"True\" probabilities - effectively our test set\n true_weights = df[\"probab\"].copy()\n\n # b, M_inv - used by IMLinUCB\n b = np.zeros((num_feats, 1))\n m_inv = np.eye(num_feats, num_feats)\n\n # Returning these\n s_best = []\n reward_best = 0\n u_e_best = []\n rewards = []\n rewards_edges = []\n\n for iter_oim in tqdm(\n range(num_repeats),\n desc=f\"OIM iters {num_edges_t} edges\",\n leave=False,\n file=sys.stderr,\n ):\n # ---- Step 1 - Calculating the u_e ----\n theta = (m_inv @ b) / (sigma * sigma)\n # xMx = (df_feats.values @ m_inv @ df_feats.T.values).clip(min=0)\n\n u_e = []\n for i in range(num_edges_t):\n x_e = df_feats.loc[i].values\n xMx = x_e @ m_inv @ x_e.T # .clip(min=0)\n u_e.append(np.clip(x_e @ theta + c * np.sqrt(xMx), 0, 1))\n # u_e.append(expit(x_e @ theta + c * np.sqrt(xMx)))\n\n u_e = np.array(u_e)\n\n # ---- Step 2 - Evaluating the performance ----\n # Loss function\n df[\"probab\"] = u_e\n s_oracle = sorted(\n oracle(\n df[[\"source\", \"target\", \"probab\"]],\n num_nodes_tim,\n num_edges_t,\n num_inf,\n epsilon,\n )\n )\n\n # Observing edge-level feedback\n df[\"probab\"] = true_weights\n\n algo_act_nodes, algo_act_edges, algo_obs_edges = run_ic_eff(df, s_oracle)\n\n algo_num_nodes = len(algo_act_nodes)\n algo_num_edges = len(algo_act_edges)\n\n rewards.append(algo_num_nodes)\n rewards_edges.append(algo_num_edges)\n\n logger_tlu.debug(f\"Algo seeds: {s_oracle}\")\n logger_tlu.debug(f\"Algo reward: {algo_num_nodes}\")\n logger_tlu.debug(f\"Best algo reward: {reward_best}\")\n logger_tlu.debug(f\"Rewards: {rewards}\")\n logger_tlu.debug(f\"Edge rewards: {rewards_edges}\")\n logger_tlu.debug(f\"Algo weights {u_e[80:90]}\".replace(\"\\n\", \"\"))\n logger_tlu.debug(f\"Real weights {true_weights[80:90]}\".replace(\"\\n\", \"\"))\n\n if algo_num_nodes > reward_best:\n reward_best = algo_num_nodes\n s_best = s_oracle\n u_e_best = u_e\n\n # ---- Step 3 - Calculating updates ----\n for i in algo_obs_edges:\n x_e = np.array([df_feats.loc[i].values])\n m_inv -= (m_inv @ x_e.T @ x_e @ m_inv) / (\n x_e @ m_inv @ x_e.T + sigma * sigma\n )\n b += x_e.T * int(i in algo_act_edges)\n\n return_dict = {\n \"rewards\": rewards,\n \"rewards_edges\": rewards_edges,\n \"s_best\": s_best,\n \"u_e_best\": u_e_best,\n \"reward_best\": reward_best,\n }\n logger_tlu.debug(\"The algorithm has finished running.\")\n logger_tlu.debug(f\"Returning: {return_dict}\")\n return return_dict", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n\n\n\n path = [starting_car_location]\n dict = {}\n index = 0\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == starting_car_location:\n index = i\n\n path = [index]\n\n G, m = adjacency_matrix_to_graph(adjacency_matrix)\n\n home_indexes = []\n\n for home in list_of_homes:\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == home:\n home_indexes.append(i)\n break\n\n new_adjacency = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n # for sake of figuring out where to walk\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, index, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2:\n di_path = nx.dijkstra_path(G, home1, home2)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n\n all_driving_path = list(nx.dfs_edges(G2))\n\n\n\n\n walking_to = []\n walking_from = {}\n\n for i in range(len(new_adjacency)):\n if i in home_indexes:\n count = 0\n edge_to = 0\n for j in range(len(new_adjacency)):\n if new_adjacency[i][j] != \"x\":\n count += 1\n edge_to = j\n\n #must ensure that this is not a home that we are already dropping someone off at, otherwise it will cut off a line of two homes\n if count == 1 and i != index and i not in walking_from.keys():\n new_adjacency[i][edge_to] = \"x\"\n new_adjacency[edge_to][i] = \"x\"\n walking_to.append(i)\n if edge_to in walking_from:\n walking_from[edge_to] = walking_from[edge_to] + [i]\n else:\n walking_from[edge_to] = [i]\n\n #\n # for i in range(len(all_driving_path) - 1):\n # #if first vertex in edge is the same, we should walk\n # if all_driving_path[i][0] == all_driving_path[i + 1][0]:\n # print(all_driving_path[i][0])\n # print(all_driving_path[i][1])\n # #get rid of only edge connected to this home\n # new_adjacency[all_driving_path[i][0]][all_driving_path[i][1]] = \"x\"\n # new_adjacency[all_driving_path[i][1]][all_driving_path[i][0]] = \"x\"\n # walking_to.append(all_driving_path[i][1])\n # if all_driving_path[i][0] in walking_from:\n # walking_from[all_driving_path[i][0]] = walking_from[all_driving_path[i][0]] + [all_driving_path[i][1]]\n # else:\n # walking_from[all_driving_path[i][0]] = [all_driving_path[i][1]]\n\n\n\n dropoff_locations = list(walking_from.keys())\n for loc in dropoff_locations:\n if loc in home_indexes:\n dropoff_locations.remove(loc)\n\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n # G = G2\n # pos=nx.spring_layout(G2)\n # nx.draw_networkx_nodes(G2,pos)\n # nx.draw_networkx_labels(G2, pos)\n # nx.draw_networkx_edges(G2,pos,width=1.0,alpha=0.5)\n #\n # plt.draw()\n # plt.show()\n\n # condensed shortest paths to edges - use G3 for real\n\n new_adjacency2 = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n for home in home_indexes:\n if home not in walking_to:\n di_path = nx.dijkstra_path(G2, index, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2 and home1 not in walking_to and home2 not in walking_to:\n di_path = nx.dijkstra_path(G2, home1, home2)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G2, index, loc)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G2, loc, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n\n\n final_G, m = adjacency_matrix_to_graph(new_adjacency2)\n drive_path = list(nx.dfs_edges(final_G, source=index))\n drive_path.append(index)\n\n mst = nx.minimum_spanning_tree(final_G)\n\n\n\n new_mst = nx.MultiGraph(mst)\n for edge in mst.edges():\n new_mst.add_edge(edge[0], edge[1])\n\n\n if new_mst.degree[index] != 0:\n to_remove = []\n for node in new_mst:\n if (new_mst.degree[node] == 0):\n to_remove.append(node)\n new_mst.remove_nodes_from(to_remove)\n\n eulerian = list(nx.eulerian_circuit(new_mst, index))\n\n path = []\n for edge in eulerian:\n path.append(edge[0])\n\n path.append(eulerian[len(eulerian) - 1][1])\n\n already_seen = []\n to_remove = []\n for i in range(len(path) - 1):\n if path[i] in already_seen:\n to_remove.append(i)\n else:\n already_seen.append(path[i])\n\n new_path = []\n for i in range(len(path) - 1):\n if i not in to_remove:\n new_path.append(path[i])\n path = new_path\n print(eulerian)\n else:\n path = [index]\n print(path)\n\n\n\n\n\n\n\n # print(path)\n final_path = []\n for node in path:\n if node == index:\n final_path.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path.append(node)\n # print(\"Dropoff loc: \", node)\n final_path.append(index)\n #print(walking_from)\n # print(final_path)\n # nx.draw(mst)\n # plt.draw()\n # plt.show()\n for node in final_path:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path = []\n for i in range(len(final_path) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path[i], final_path[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path.append(condensed_path[j])\n\n if len(very_final_path) >= 1 and [len(very_final_path) - 1] != index:\n very_final_path.append(index)\n\n if len(very_final_path) == 0:\n very_final_path = [index]\n\n print(very_final_path)\n print(dict)\n\n\n path2 = list(nx.dfs_preorder_nodes(mst, index))\n\n final_path2 = []\n for node in path2:\n if node == index:\n final_path2.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path2.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path2.append(node)\n # print(\"Dropoff loc: \", node)\n final_path2.append(index)\n\n\n for node in final_path2:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path2 = []\n for i in range(len(final_path2) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path2[i], final_path2[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path2.append(condensed_path[j])\n\n if len(very_final_path2) >= 1 and [len(very_final_path2) - 1] != index:\n very_final_path2.append(index)\n\n if len(very_final_path2) == 0:\n very_final_path2 = [index]\n\n opt1 = cost_of_solution(G, very_final_path, dict)\n opt2 = cost_of_solution(G, very_final_path2, dict)\n\n ultra_final_path = []\n if (opt1 <= opt2):\n ultra_final_path = very_final_path\n else:\n ultra_final_path = very_final_path2\n\n return ultra_final_path, dict\n\n pass", "def forwardVariableGeneration(self):\n self.alpha = zeros((self.noOfEmmittingStates+2, self.T + 1))\n\n # initialistation\n self.alpha[0,0] = 1.0\n self.alpha[1:,0] = 0.0\n self.alpha[0,1:] = 0.0\n\n # main recursion\n for t in range(1, self.T+1):\n for j in range(1, self.noOfEmmittingStates+1):\n partialSum = 0\n for k in range(self.noOfEmmittingStates+1):\n partialSum += (self.alpha[k, t-1] * self.transitionMatrix[k, j-1])\n self.alpha[j, t] = self.b[j-1, t-1] * partialSum\n # since must end in final state, last alpha for states with zero transition\n # prob to last state must be zero?\n for row in range(self.transitionMatrix.shape[0]):\n if self.transitionMatrix[row,-1] == 0.0:\n self.alpha[row,-1] = 0.0\n # fwd prob variable for final state at 'last' timestep gets bumped into the\n # final column to save having a needless column\n partialSum = 0\n for k in range(self.noOfEmmittingStates+1):\n partialSum += (self.alpha[k,-1] * self.transitionMatrix[k,-1])\n self.alpha[-1,-1] = partialSum\n\n # likelihood of observed sequence, p(O|lambda)\n self.observationLikelihood = self.alpha[-1,-1]", "def prob3():\n #set up the matrices\n solvers.options['show_progress'] = False\n c = np.array([4., 7., 6., 8., 8., 9.])\n\n G = np.array([[1.,1.,0.,0.,0.,0.],\n [-1.,-1.,0.,0.,0.,0.],\n [0.,0.,1.,1.,0.,0.],\n [0.,0.,-1.,-1.,0.,0.],\n [0.,0.,0.,0.,1.,1.],\n [0.,0.,0.,0.,-1.,-1.],\n [1.,0.,1.,0.,1.,0.],\n [-1.,0.,-1.,0.,-1.,0.],\n [0.,1.,0.,1.,0.,1.],\n [0.,-1.,0.,-1.,0.,-1.]])\n G = np.vstack((G, -1*np.eye(6)))\n h = np.array([7,-7,2,-2,4,-4,5,-5,8,-8,0,0,0,0,0,0],dtype=\"float\")\n\n #convert the matrices\n c = matrix(c)\n G = matrix(G)\n h = matrix(h)\n #solve the matrices\n sol = solvers.lp(c, G, h)\n return np.ravel(sol['x']), sol['primal objective']", "def _get_maze_transition_probabilities(locs, movedir, exits):\n num_states = len(locs) + 1 # including final \"game over\" state\n num_actions = len(movedir)\n\n p = np.zeros((num_states, num_states, num_actions))\n for s, c in enumerate(locs):\n if c in exits:\n # transition to \"game over\" state (assumed to be last state)\n p[-1, s, :] = 1\n else:\n for a in range(num_actions):\n for j in [-1, 0, 1]:\n # next location\n cnext = [c[0] + movedir[(a + j) % num_actions][0],\n c[1] + movedir[(a + j) % num_actions][1]]\n if cnext in locs:\n snext = locs.index(cnext)\n else:\n # remain at current field\n snext = s\n # update probability table\n p[snext, s, a] += (0.8 if j == 0 else 0.1)\n # \"game over\" state transitions to itself\n p[-1, -1, :] = 1\n\n return p", "def probX(Xs, a, td, irts=Irts({}), prior=None, origmat=None, changed=[]):\n\n try:\n numnodes=len(a)\n except TypeError:\n raise Exception(a)\n reg=(1+1e-10) # nuisance parameter to prevent errors; can also use pinv instead of inv, but that's much slower\n identmat=np.identity(numnodes) * reg # pre-compute for tiny speed-up (only for non-IRT)\n\n probs=[]\n\n # generate transition matrix (from: column, to: row) from link matrix\n t=a/sum(a.astype(float))\n t=np.nan_to_num(t) # jumping/priming models can have nan in matrix, need to change to 0\n \n if (td.jumptype==\"stationary\") or (td.start_node==\"stationary\"):\n statdist=stationary(t)\n\n # U-INVITE probability excluding jumps, prior, and priming adjustments -- those come later\n for xnum, x in enumerate(Xs):\n x2=np.array(x)\n t2=t[x2[:,None],x2] # re-arrange transition matrix to be in list order\n prob=[]\n if td.start_node==\"stationary\":\n prob.append(statdist[x[0]]) # probability of X_1\n elif td.start_node==\"uniform\":\n prob.append(1.0/numnodes)\n\n # if impossible starting point, return immediately\n if (prob[-1]==0.0):\n try:\n return -np.inf, (x[0], x[1])\n except:\n return -np.inf, x[0]\n\n if (len(changed) > 0) and isinstance(origmat,list): # if updating prob. matrix based on specific link changes\n update=0 # reset for each list\n\n # flag if list contains perseverations\n if len(x) == len(set(x)):\n list_has_perseverations = False\n else:\n list_has_perseverations = True\n\n for curpos in range(1,len(x)):\n if (len(changed) > 0) and isinstance(origmat,list):\n if update==0: # first check if probability needs to be updated\n if (Xs[xnum][curpos-1] in changed): # (only AFTER first changed node has been reached)\n update=1\n else: # if not, take probability from old matrix\n prob.append(origmat[xnum][curpos])\n continue\n \n if list_has_perseverations: # a bit slower because matrix is being copied\n x2=np.array([i for i,j in enumerate(x) if (j not in x[:i]) and (i < curpos)]) # column ids for transient states excluding perseverations\n Q=t2[x2[:,None],x2] # excludes perseverations. could be sped if only performed when Q contains perseverations\n # as opposed to being done for every transition if a perseveration is in the list\n else: \n Q=t2[:curpos,:curpos] # old way when data does not include perseverations\n \n # td.censor_fault is necessary to model perservations in the data\n if td.censor_fault > 0.0:\n Q=np.multiply(Q, 1.0-td.censor_fault)\n \n if len(irts.data) > 0: # use this method only when passing IRTs\n numcols=len(Q)\n flist=[]\n newQ=np.zeros(numcols) # init to Q^0, for when r=1\n newQ[curpos-1]=1.0 # (using only one: row for efficiency)\n\n irt=irts.data[xnum][curpos-1]\n\n # precompute for small speedup\n if irts.irttype==\"gamma\":\n logbeta=np.log(irts.gamma_beta)\n logirt=np.log(irt)\n\n # normalize irt probabilities to avoid irt weighting\n if irts.irttype==\"gamma\":\n # r=alpha. probability of observing irt at r steps\n irtdist=[r*logbeta-math.lgamma(r)+(r-1)*logirt-irts.gamma_beta*irt for r in range(1,irts.rcutoff)]\n if irts.irttype==\"exgauss\":\n \n irtdist=[np.log(irts.exgauss_lambda/2.0)+(irts.exgauss_lambda/2.0)*(2.0*r+irts.exgauss_lambda*(irts.exgauss_sigma**2)-2*irt)+np.log(math.erfc((r+irts.exgauss_lambda*(irts.exgauss_sigma**2)-irt)/(np.sqrt(2)*irts.exgauss_sigma))) for r in range(1,irts.rcutoff)]\n\n for r in range(1,irts.rcutoff):\n innersum=0\n for k in range(numcols):\n num1=newQ[k] # probability of being at node k in r-1 steps\n num2=t2[curpos,k] # probability transitioning from k to absorbing node \n innersum=innersum+(num1*num2)\n\n # compute irt probability given r steps\n log_dist = irtdist[r-1] / sum(irtdist)\n\n if innersum > 0: # sometimes it's not possible to get to the target node in r steps\n flist.append(log_dist + np.log(innersum))\n\n newQ=np.inner(newQ,Q) # raise power by one\n\n f=sum([np.e**i for i in flist])\n prob.append(f) # probability of x_(t-1) to X_t\n else: # if no IRTs, use standard U-INVITE\n I=identmat[:len(Q),:len(Q)]\n \n # novel items are emitted with probability 1 when encountered. perseverations are emitted with probability td.censor_fault when encountered.\n if list_has_perseverations: # if list has perseverations. could speed up by only doing this step when a perseveration has been encountered\n x1=np.array([curpos]) # absorbing node\n #x2=np.array([i for i,j in enumerate(x) if (j not in x[:i]) and (i < curpos)]) # column ids for transient states excluding perseverations\n x2=np.array([i for i,j in enumerate(x) if (j not in x[i+1:curpos]) and (i < curpos)]) # column ids for transient states excluding perseverations\n R=t2[x1[:,None],x2][0] # why is [0] necessary here but not in the else case?\n \n if Xs[xnum][curpos] in Xs[xnum][:curpos]: # if absorbing state has appeared in list before...\n R=np.multiply(R,td.censor_fault)\n else: # if not a perseveration\n R=t2[curpos,:curpos] # old way\n \n ### test (when censor_fault=0) to see if absorbing distribution sums to 1... something is broken\n #total = []\n #x2=np.array([j for i,j in enumerate(x) if (i < curpos)]) # column ids for transient states excluding perseverations\n #N=np.linalg.solve(I-Q,I[-1])\n #for i in range(len(t)):\n # R=t[np.array([i])[:,None],x2]\n # B=np.dot(R,N)\n # total.append(B[0])\n # if B[0] > 1.0:\n # print(\"NONONO\")\n #print(\"total \", total)\n #R=t2[curpos,:curpos] # old way to reset\n ###\n \n N=np.linalg.solve(I-Q,I[-1])\n B=np.dot(R,N)\n if np.isnan(B):\n B=0.0\n prob.append(B)\n \n # alternative/original using matrix inverse\n #R=t2[curpos:,:curpos]\n #N=inv(I-Q)\n #B=np.dot(R,N) \n #prob.append(B[0,curpos-1])\n\n # if there's an impossible transition and no jumping/priming, return immediately\n if (prob[-1]==0.0) and (td.jump == 0.0) and (td.priming == 0.0):\n return -np.inf, (x[curpos-1], x[curpos])\n\n probs.append(prob)\n\n uinvite_probs = copy.deepcopy(probs) # store only u-invite transition probabilities (the computationally hard stuff) to avoid recomputing\n \n # adjust for jumping probability\n if td.jump > 0.0:\n if td.jumptype==\"uniform\":\n probs=addJumps(probs, td, numnodes=numnodes)\n elif td.jumptype==\"stationary\":\n probs=addJumps(probs, td, statdist=statdist, Xs=Xs)\n\n if (td.priming > 0.0):\n probs=adjustPriming(probs, td, Xs)\n\n # check for impossible transitions after priming and jumping\n for xnum, x in enumerate(probs):\n for inum, i in enumerate(x):\n if (i==0.0) and (inum==0):\n return -np.inf, (Xs[xnum][inum], Xs[xnum][inum+1]) # link to next item when first item is unreachable\n elif (i==0.0) and (inum > 0):\n return -np.inf, (Xs[xnum][inum-1], Xs[xnum][inum]) # link to previous item otherwise\n \n try:\n ll=sum([sum([np.log(j) for j in probs[i]]) for i in range(len(probs))])\n except:\n ll=-np.inf\n\n # include prior?\n if prior:\n priorlogprob = evalGraphPrior(a, prior)\n ll = ll + priorlogprob\n\n return ll, uinvite_probs", "def Find_Path(self):\n closed_nodes_map = [] # map of closed (tried-out) nodes\n open_nodes_map = [] # map of open (not-yet-tried) nodes\n dir_map = [] # map of directions\n row = [0] * self.n\n for i in range(self.m): # create 2d arrays\n closed_nodes_map.append(list(row))\n open_nodes_map.append(list(row))\n dir_map.append(list(row))\n \n pq = [[], []] # priority queues of open (not-yet-tried) nodes\n pqi = 0 # priority queue index\n # create the start node and push into list of open nodes\n n0 = node(self.xStart, self.yStart, 0.0, 0.0)\n n0.updatePriority(self.xFinish, self.yFinish)\n heappush(pq[pqi], n0)\n open_nodes_map[self.yStart][self.xStart] = n0.priority # mark it on the open nodes map\n \n # A* search\n while len(pq[pqi]) > 0:\n # get the current node w/ the highest priority\n # from the list of open nodes\n n1 = pq[pqi][0] # top node\n n0 = node(n1.xPos, n1.yPos, n1.distance, n1.priority)\n x = n0.xPos\n y = n0.yPos\n heappop(pq[pqi]) # remove the node from the open list\n open_nodes_map[y][x] = 0\n # mark it on the closed nodes map\n closed_nodes_map[y][x] = 1\n \n # quit searching when the goal state is reached\n if x == self.xFinish and y == self.yFinish:\n # Generate the path from finish to start by following the \n # directions.\n return self.Reconstruct_Path(dir_map)\n \n # generate moves (child nodes) in all possible directions\n for i in range(self.num_directions):\n new_x = x + self.dx[i]\n new_y = y + self.dy[i]\n Flag=True\n if not (new_x < 0 or new_x > self.n-1 or new_y < 0 or new_y > self.m - 1\n or self.MAP[new_y][new_x] == 1 or closed_nodes_map[new_y][new_x] == 1):\n # Check to see if the extended path runs through any obstacles\n if (abs(self.dx[i])>1 or abs(self.dy[i])>1):\n # Need to check that the path does not pass an object\n JumpCells=2*max(abs(self.dx[i]),abs(self.dy[i]))-1\n for K in range(1,JumpCells):\n YPOS=int(round(K*1.0*self.dy[i]/JumpCells))\n XPOS=int(round(K*1.0*self.dx[i]/JumpCells))\n if (self.MAP[y+YPOS][x+XPOS]==1):\n Flag=False\n if Flag: \n # generate a child node\n m0 = node(new_x, new_y, n0.distance, n0.priority)\n m0.calc_cost(self.dx[i], self.dy[i])\n m0.updatePriority(self.xFinish, self.yFinish)\n # if it is not in the open list then add into that\n if open_nodes_map[new_y][new_x] == 0:\n open_nodes_map[new_y][new_x] = m0.priority\n heappush(pq[pqi], m0)\n # mark its parent node direction\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n elif open_nodes_map[new_y][new_x] > m0.priority:\n # update the priority info\n open_nodes_map[new_y][new_x] = m0.priority\n # update the parent direction info\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n # replace the node\n # by emptying one pq to the other one\n # except the node to be replaced will be ignored\n # and the new node will be pushed in instead\n while not (pq[pqi][0].xPos == new_x and pq[pqi][0].yPos == new_y):\n heappush(pq[1 - pqi], pq[pqi][0])\n heappop(pq[pqi])\n heappop(pq[pqi]) # remove the wanted node\n # empty the larger size pq to the smaller one\n if len(pq[pqi]) > len(pq[1 - pqi]):\n pqi = 1 - pqi\n while len(pq[pqi]) > 0:\n heappush(pq[1-pqi], pq[pqi][0])\n heappop(pq[pqi]) \n pqi = 1 - pqi\n heappush(pq[pqi], m0) # add the better node instead\n return '','' # no route found", "def TSP(noOfCities, cityIndicesExcluding1, citiesDistance):\r\n\r\n # Initializing the DP Matrix\r\n # Dictionary because the subset is also to be saved alongwith end vertex as index of dictionary\r\n dpMatrixDict = {}\r\n\r\n # Base case\r\n dpMatrixDict[ ( 1, ), 1 ] = 0\r\n\r\n # Iterating to solve bigger subproblems using dynamic programming\r\n for subsetSizeWithout1 in range(1, noOfCities):\r\n\r\n # Subset is of size subsetSizeWithout1 + 1\r\n print(\"COMPUTING. ON ITERATION NUMBER : \" + str(subsetSizeWithout1) + \" OUT OF \" + str(noOfCities - 1) )\r\n\r\n # Reducing computation by ignoring off the smaller subproblems solutions no longer required\r\n if subsetSizeWithout1 > 3:\r\n smallerEfficientDpMatrixDict = {}\r\n for key in dpMatrixDict:\r\n if len(key[0]) == subsetSizeWithout1:\r\n smallerEfficientDpMatrixDict[key] = dpMatrixDict[key]\r\n dpMatrixDict = smallerEfficientDpMatrixDict\r\n\r\n # Getting the subsets reuired\r\n sizeSpecificSubsets = getSubsets(cityIndicesExcluding1, subsetSizeWithout1)\r\n\r\n # Base cases\r\n for subset in sizeSpecificSubsets:\r\n dpMatrixDict[subset, 1] = 99999999\r\n\r\n\r\n for subset in sizeSpecificSubsets:\r\n\r\n # Computing through each possible end vertex\r\n for j in subset[1:]:\r\n\r\n # List to store the candidates for minimums\r\n possibilities = []\r\n\r\n # Computing through each possible last hop\r\n for k in subset:\r\n\r\n # Storing possibilities alongwith the end vertex\r\n if k != j:\r\n tupleCopy = tupleCopyWithoutElement(subset, j)\r\n possibilities.append( dpMatrixDict[tupleCopy, k] + citiesDistance[k, j] )\r\n\r\n # Getting the minimum path from the possible minimum candidates\r\n try:\r\n minimumPath = min(possibilities)\r\n dpMatrixDict[subset, j] = minimumPath\r\n except:\r\n continue\r\n\r\n\r\n # List for storing all final possible path candidates containing all the vertices\r\n # and a last hop between the start and end vertex to make a cycle\r\n finalHamiltonianPathCandidates = []\r\n\r\n # Final Set(and/or Subset) including all the vertices\r\n almostCompletePath = tuple(range(1, noOfCities + 1))\r\n\r\n # Adding the last hop of the cycle of hamiltonian path between the end and start vertex\r\n for j in cityIndicesExcluding1:\r\n finalHamiltonianPathCandidates.append( dpMatrixDict[almostCompletePath, j] + citiesDistance[j, 1] )\r\n\r\n # Getting the final minimum solution\r\n hamiltonianPathSolution = min(finalHamiltonianPathCandidates)\r\n\r\n # Printing the solution\r\n print(\"The optimal(minimum) length Hamiltonian path distance is : \" + str( hamiltonianPathSolution ) )\r\n\r\n return", "def hessian_cost(self, joint_angles: dict, ee_goals) -> np.ndarray:\n kinematic_map = self.kinematic_map[\"p0\"] # get map to all nodes from root\n end_effector_nodes = ee_goals.keys()\n H = np.zeros((self.n, self.n))\n for (\n ee\n ) in end_effector_nodes: # iterate through end-effector nodes, assumes sorted\n ee_path = kinematic_map[ee][\n 1:\n ] # [:-1] # no last node, only phys. joint locations\n t_ee = self.get_pose(joint_angles, ee).trans\n dg_ee_x = t_ee[0] - ee_goals[ee].trans[0]\n dg_ee_y = t_ee[1] - ee_goals[ee].trans[1]\n for (pdx, joint_p) in enumerate(ee_path): # algorithm fills Hess per column\n p_idx = int(joint_p[1:]) - 1\n sin_p_term = 0.0\n cos_p_term = 0.0\n for jdx in range(pdx, len(ee_path)):\n node_jdx = ee_path[jdx]\n theta_jdx = sum([joint_angles[key] for key in ee_path[0 : jdx + 1]])\n sin_p_term += self.a[node_jdx] * np.sin(theta_jdx)\n cos_p_term += self.a[node_jdx] * np.cos(theta_jdx)\n\n for (qdx, joint_q) in enumerate(\n ee_path[pdx:]\n ): # TODO: check if starting from pdx works\n qdx = qdx + pdx\n q_idx = int(joint_q[1:]) - 1\n sin_q_term = 0.0\n cos_q_term = 0.0\n for kdx in range(qdx, len(ee_path)):\n node_kdx = ee_path[kdx]\n theta_kdx = sum(\n [joint_angles[key] for key in ee_path[0 : kdx + 1]]\n )\n sin_q_term += self.a[node_kdx] * np.sin(theta_kdx)\n cos_q_term += self.a[node_kdx] * np.cos(theta_kdx)\n\n # assert(q_idx >= p_idx)\n H[p_idx, q_idx] += (\n 2.0 * sin_q_term * sin_p_term\n - 2.0 * dg_ee_x * cos_q_term\n + 2.0 * cos_p_term * cos_q_term\n - 2.0 * dg_ee_y * sin_q_term\n )\n\n return H + H.T - np.diag(np.diag(H))", "def fastest_path_estimation(sol):\n\n class Path:\n def __init__(self, places, graph):\n self.g = 0 # current cost\n self.graph = graph\n self.visited = [places[0]] # list of already visited attractions\n self.not_visited = copy.deepcopy(places[1:]) # list of attractions not yet visited\n\n def __lt__(self, other):\n return self.g < other.g\n\n def add(self, idx):\n # add the cost\n self.g += self.graph[self.visited[-1], idx]\n # add the to the visited place and remove from the unvisited places\n self.visited.append(idx)\n self.not_visited.remove(idx)\n\n def add_to_heap_queue(path):\n # custom function to add to heap queue sorted by the solution's cost\n heappush(h_queue, path)\n\n if len(sol.not_visited) == 0:\n return 0\n elif len(sol.not_visited) == 1:\n return sol.graph[sol.visited[-1], sol.not_visited[0]]\n\n c = sol.visited[-1]\n pm = sol.not_visited[-1]\n # the heap queue of solution sorted by their cost - change all to tuples with g for dijkstra\n h_queue = []\n\n # the places to use for the graph\n sub_search_places = [c]\n sub_search_places.extend(sol.not_visited)\n\n # push the first \"node\" in the queue\n add_to_heap_queue(Path(sub_search_places, sol.graph))\n while True:\n # take the next solution with the shortest cost\n path = heappop(h_queue)\n # if it contains destination, stop and return that solution\n if pm in path.visited:\n return path.g\n # create a new solution for each neighbor of the current vertex and add it to heap queue\n for place in path.not_visited:\n new_path = copy.deepcopy(path)\n new_path.add(place)\n add_to_heap_queue(new_path)", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n loc_map = {}\n drop_off_dict = {}\n num_home_visited = 0\n\n \"\"\"\n for i in range(len(list_of_locations)):\n loc_map[i] = list_of_locations[0]\n \"\"\"\n\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n num_homes = len(list_of_homes)\n\n car_path = []\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n visited = set()\n\n #print(start)\n car_path.append(start)\n current_node = start\n\n if start in home_indexes:\n visited.add(start)\n drop_off_dict[start] = [start]\n num_home_visited += 1\n\n while num_home_visited < num_homes:\n dist_dict = all_paths.get(current_node)[0]\n paths_dict = all_paths.get(current_node)[1]\n\n dist_dict = {k:v for (k,v) in dist_dict.items() if k not in visited and k in home_indexes}\n min_dist = min(dist_dict.values())\n min_list = [k for k in dist_dict.keys() if dist_dict[k] <= min_dist]\n #print(dist_dict.values())\n target = min_list[0]\n drop_off_dict[target] = [target]\n #print(target+1)\n #print(target)\n car_path.pop()\n car_path.extend(paths_dict[target])\n\n visited.add(target)\n current_node = target\n num_home_visited += 1\n\n paths_dict = all_paths.get(current_node)[1]\n car_path.pop()\n car_path.extend(paths_dict[start])\n #print((drop_off_dict.keys()))\n #car_path = [start, ...., start]\n #drop_off_dict = {drop_off_loc: [home1, home2, ...] }\n\n return car_path, drop_off_dict", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n def __eq__(self, other):\n if isinstance(other, Node):\n return self.state == other.state\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n frontier = util.PriorityQueue() #ucs uses a priority queue\n frontier.push(initialNode, initialNode.pathCost)\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n if problem.isGoalState(nextNode.state):\n return nextNode.solution()\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored:\n frontier.update(child, child.pathCost) #we only check if state is in explored because update does the other\n return []\n util.raiseNotDefined()", "def update_flow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n # (1): add all edges (u, v) with capacity ub-lb\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"upper_bound\"] == float('inf'):\n self.arc_info[arc][\"upper_bound\"] = B\n for arc in self.arc_info.keys():\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(int(self.arc_info[arc][\"upper_bound\"]\\\n - self.arc_info[arc][\"lower_bound\"]))\n # (2): add edge (t, s) with capacity B\n # B = max_lb * (m - n + 2)\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n if B == 0:\n #B = float('inf')\n B = 100000\n start_nodes.append(self.sink())\n end_nodes.append(self.source())\n capacities.append(int(B))\n # (3): for all verts, if exc > 0, add edge (s', v) with capacity exc(v),\n # and if exc < 0, add edge(s', v) with capacity -exc(v)\n s_prime = max(self.vertices) + 1\n t_prime = max(self.vertices) + 2\n print(\"s'={}, t'={}\".format(s_prime, t_prime))\n for v in self:\n #print(\"vert {} in arcs: {}\".format(v,\n # self.in_arcs_lists[v]))\n # compute exc: lower bounds of in - lower bounds of out\n sum_lb_in = 0\n for in_arc in self.in_arcs_lists[v]:\n sum_lb_in += self.arc_info[in_arc][\"lower_bound\"]\n sum_lb_out = 0\n #print(\"vert {} out arcs: {}\".format(v,\n # self.out_arcs_lists[v]))\n for out_arc in self.out_arcs_lists[v]:\n sum_lb_out += self.arc_info[out_arc][\"lower_bound\"]\n exc = sum_lb_in - sum_lb_out\n #print(\"exc is {}\".format(exc))\n if exc > 0:\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(exc))\n else:\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(-exc))\n # solve maxflow\n #print(\"s' is {} and t' is {}\".format(s_prime, t_prime))\n max_flow = pywrapgraph.SimpleMaxFlow()\n for u, v, cap in zip(start_nodes, end_nodes, capacities):\n #print(\"Adding edge {}, {} with cap {}\".format(u,v,cap))\n max_flow.AddArcWithCapacity(u, v, cap)\n success = True\n if max_flow.Solve(s_prime, t_prime) == max_flow.OPTIMAL:\n #print('Max flow: {}'.format( max_flow.OptimalFlow()))\n #print(' Arc Flow / Capacity')\n for i in range(max_flow.NumArcs()):\n # print('%1s -> %1s %3s / %3s' % (\n # max_flow.Tail(i),\n # max_flow.Head(i),\n # max_flow.Flow(i),\n # max_flow.Capacity(i)))\n # check that (s', v) edges are saturated (once we find a false,\n # stay false forever)\n if success:\n if max_flow.Tail(i) == s_prime:\n success = max_flow.Flow(i) == max_flow.Capacity(i)\n else:\n success = False\n print('There was an issue with the max flow input.')\n if success:\n # update the flows to be the flow found from maxflow problem\n for i in range(max_flow.NumArcs()):\n # if this is an original arc, update the flow\n if max_flow.Tail(i) != s_prime \\\n and max_flow.Head(i) != t_prime \\\n and not (max_flow.Tail(i) == self.sink() \\\n and max_flow.Head(i) == self.source()):\n # update arc\n start = max_flow.Tail(i)\n destin = max_flow.Head(i)\n arc = self.get_arc(start, destin)\n new_flow = self.arc_info[arc][\"lower_bound\"] + max_flow.Flow(i)\n old_flow = self.arc_info[arc][\"weight\"]\n self.arc_info[arc][\"weight\"] = new_flow\n #print(\"Edge {} {} adjusted from {} to {}\".format(\n # start,\n # destin,\n # old_flow,\n # new_flow\n # ))\n self.check_conservation_of_flow() # check that solution is valid\n return True\n else:\n return False", "def solve():\n # the amount of lattice paths from (0, 0) to (n, k) is (n+k) over n (according to Wikipedia)\n return binomial_coefficient(20 + 20, 20)", "def __smooth_emission_params(self):\n params_count = {}\n unique_symbols = []\n for key, value in self.emission_dict.items():\n if key[0] not in unique_symbols:\n unique_symbols.append(key[0])\n \n n = len(unique_symbols)\n # n refers to the number of observations/symbols \n\n for state in self.states:\n params_count[state] = [0,0,0]\n # print(params_count[state])\n # key is the state, value is list [total no. of symbols, total no. of non-zero probability, probability p]\n # i.e. [Ts, v, p]\n for key, value in self.emission_dict.items():\n if state in key:\n params_count[state][0] += 1\n if value != 0:\n params_count[state][1] += 1\n else:\n continue\n params_count[state][2] += 1/(params_count[state][0] + params_count[state][1])\n # p = 1/(Ts+v)\n \n for state in self.states:\n for key, value in self.emission_dict.items():\n if state in key:\n if value != 0:\n self.emission_dict[key] = value - params_count[state][2]\n else:\n self.emission_dict[key] = (params_count[state][2]*params_count[state][2])/n-params_count[state][2]\n # v*p/n-v", "def viterbi(self, features: Tensor, masks: Tensor):\n B, T, H = features.shape\n backpointers = torch.zeros(B, T, H, dtype=torch.long, device=self.device)\n \n max_score = torch.full((B, H), NOT_POSSIBLE_TRANSITION, device=self.device)\n # From start tag to any other tag\n max_score[:, self.start_idx] = 0\n # For every single timestep.\n for t in range(T):\n mask_t = masks[:, t].unsqueeze(1)\n emit_score_t = features[:, t]\n\n acc_score_t = max_score.unsqueeze(1) + self.transition_matrix\n acc_score_t, backpointers[:, t, :] = acc_score_t.max(dim=-1)\n acc_score_t += emit_score_t\n max_score = acc_score_t * mask_t + max_score * (1 - mask_t)\n\n max_score += self.transition_matrix[self.stop_idx]\n best_score, best_tag = max_score.max(dim=-1)\n\n best_paths = []\n backpointers = backpointers.cpu().numpy()\n for b in range(B):\n best_tag_b = best_tag[b].item()\n seq_len = int(masks[b, :].sum().item())\n \n best_path = [best_tag_b]\n for bps_t in reversed(backpointers[b, :seq_len]):\n best_tag_b = bps_t[best_tag_b]\n best_path.append(best_tag_b)\n\n best_paths.append(best_path[-2::-1])\n\n return best_score, best_paths", "def linearize_and_solve(g):\n\n # initialize the sparse H and the vector b\n H = np.zeros((len(g.x), len(g.x)), dtype='float')\n b = np.zeros(len(g.x), dtype='float')\n\n # set flag to fix gauge\n needToAddPrior = True\n Fx = 0\n\n # compute the addend term to H and b for each of our constraints\n print('linearize and build system')\n\n for edge in g.edges:\n\n # pose-pose constraint\n if edge.Type == 'P':\n\n # compute idx for nodes using lookup table\n fromIdx = g.lut[edge.fromNode]\n toIdx = g.lut[edge.toNode]\n\n # get node state for the current edge\n x_i = g.x[fromIdx:fromIdx + 3]\n x_j = g.x[toIdx:toIdx + 3]\n\n # (TODO) compute the error and the Jacobians\n e, A, B = linearize_pose_pose_constraint(\n x_i, x_j, edge.measurement)\n\n # # (TODO) compute the terms\n b_i = e.transpose() @ edge.information @ A\n b_j = e.transpose() @ edge.information @ B\n H_ii = A.transpose() @ edge.information @ A\n H_ij = A.transpose() @ edge.information @ B\n H_jj = B.transpose() @ edge.information @ B\n\n # (TODO) add the terms to H matrix and b\n H[fromIdx:fromIdx + 3, fromIdx:fromIdx + 3] += H_ii\n H[toIdx:toIdx + 3, toIdx:toIdx + 3] += H_jj\n H[fromIdx:fromIdx + 3, toIdx:toIdx + 3] += H_ij\n H[toIdx:toIdx + 3, fromIdx:fromIdx + 3, ] += H_ij.transpose()\n b[fromIdx:fromIdx + 3] += b_i[0, :]\n b[toIdx:toIdx + 3] += b_j[0, :]\n\n # Add the prior for one pose of this edge\n # This fixes one node to remain at its current location\n if needToAddPrior:\n H[fromIdx:fromIdx + 3, fromIdx:fromIdx +\n 3] = H[fromIdx:fromIdx + 3,\n fromIdx:fromIdx + 3] + 1000 * np.eye(3)\n needToAddPrior = False\n\n # pose-pose constraint\n elif edge.Type == 'L':\n print(\"you shouldn't be here...\")\n # compute idx for nodes using lookup table\n fromIdx = g.lut[edge.fromNode]\n toIdx = g.lut[edge.toNode]\n\n # get node states for the current edge\n x = g.x[fromIdx:fromIdx + 3]\n l = g.x[toIdx:toIdx + 2]\n\n # (TODO) compute the error and the Jacobians\n e, A, B = linearize_pose_landmark_constraint(\n x, l, edge.measurement)\n\n # (TODO) compute the terms\n b_i = e.transpose() @ edge.information @ A\n b_j = e.transpose() @ edge.information @ B\n H_ii = A.transpose() @ edge.information @ A\n H_ij = A.transpose() @ edge.information @ B\n H_jj = B.transpose() @ edge.information @ B\n\n # (TODO )add the terms to H matrix and b\n H[fromIdx:fromIdx + 3, fromIdx:fromIdx + 3] += H_ii\n H[toIdx:toIdx + 2, toIdx:toIdx + 2] += H_jj\n H[fromIdx:fromIdx + 3, toIdx:toIdx + 2] += H_ij\n H[toIdx:toIdx + 2, fromIdx:fromIdx + 3, ] += H_ij.transpose()\n b[fromIdx:fromIdx + 3] = b_i\n b[toIdx:toIdx + 2] = b_j\n # solve system\n dx = np.linalg.solve(H, b)\n\n return dx", "def compute_limit_matrix(gamma, adjacency, n_states):\n num_states = n_states\n identity = np.eye(num_states)\n return np.linalg.inv(identity - gamma * adjacency / 6)", "def TSP_ILP(G):\n V1 = range(len(G))\n n, V = len(G), set(V1)\n model = Model() # binary variables indicating if arc (i,j) is used\n # on the route or not\n x = [[model.add_var(var_type=BINARY) for j in V] for i in V]\n # continuous variable to prevent subtours: each city will have a\n # different sequential id in the planned route except the 1st one\n y = [model.add_var() for i in V]\n # objective function: minimize the distance\n model.objective = minimize(xsum(G[i][j]*x[i][j] for i in V for j in V))\n\n # constraint : leave each city only once\n for i in V:\n model += xsum(x[i][j] for j in V - {i}) == 1\n # constraint : enter each city only once\n for i in V:\n model += xsum(x[j][i] for j in V - {i}) == 1 # subtour elimination\n for (i, j) in product(V - {0}, V - {0}):\n if i != j:\n model += y[i] - (n+1)*x[i][j] >= y[j]-n # optimizing\n\n model.verbose = 0\n model.optimize() # checking if a solution was found\n\n if model.num_solutions:\n nc = 0 # cycle starts from vertex 0\n cycle = [nc]\n while True:\n nc = [i for i in V if x[nc][i].x >= 0.99][0]\n cycle.append(nc)\n if nc == 0:\n break\n\n return (model.objective_value, cycle)", "def _matrix_store_smooth_downhill(self):\n \n import time\n from scipy import sparse as sparse\n from scipy.sparse import linalg as linalgs \n \n\n t = time.clock()\n\n\n size = 0\n for nl in self.neighbour_array_lo_hi:\n size += 3 # len(nl)\n\n row_array = np.empty(size, dtype = int)\n col_array = np.empty(size, dtype = int)\n slope_array = np.zeros(size)\n local_slope_array = np.zeros(64)\n\n\n idx=0 \n for row in range(0, len(self.neighbour_array_lo_hi)): \n neighbours = self.neighbour_array_lo_hi[row] \n npoints = self.tri.points[neighbours]\n\n ## work out (downhill) gradient to (max of three) nearby neighbours\n \n\n for col, column in enumerate(neighbours[0:3]): \n \n delta_h = self.height[column] - self.height[row] \n\n\n if delta_h < 0.0:\n delta_s2 = (self.x[column] - self.x[row])**2 + (self.y[column] - self.y[row])**2\n local_slope_array[col] = ( delta_h**2 / delta_s2 )**5\n\n elif delta_h == 0.0 and self.bmask[row] == False:\n local_slope_array[col] = 1.0e-20\n\n else:\n local_slope_array[col] = 1.0e-20 \n \n # Normalise this so that it conserves mass (note - low points will have no contributions here !) \n \n norm = local_slope_array[0:len(neighbours)].sum()\n if norm != 0.0:\n norm = 1.0 / norm\n\n for col, column in enumerate(neighbours[0:3]): \n row_array[idx] = row\n col_array[idx] = column \n slope_array[idx] = local_slope_array[col] * norm\n\n idx += 1\n\n # We can re-pack this array into a sparse matrix for v. fast computation of downhill operator \n\n slopeCOO = sparse.coo_matrix( (slope_array, (row_array, col_array)) ).T\n slopeMat = slopeCOO.tocsr() \n \n print \"SlopeMat.shape \", slopeMat.shape, size\n\n # slopeNormVec = np.array(slopeMat.sum(axis=1)).T[0]\n # slopeNormVec[slopeNormVec != 0.0] = 1.0 / slopeNormVec[slopeNormVec != 0.0]\n # slopeNormMat = sparse.eye(self.tri.npoints)\n # slopeNormMat.setdiag(slopeNormVec)\n # slopeMat = slopeNormMat.dot(slopeMat)\n\n slopeMat.eliminate_zeros()\n self.smoothDownhillMat = slopeMat\n\n return", "def run(self,step=2,\n sizePop=100,\n infoFields=['migrate_to','fitness'],\n recombination_rate = 0.00375,\n migration_rate = 0.01,\n mutation_rate = [0.00000001],\n subPopNames = ['x','y','z','w'],\n alleleNames = ['A','B'],\n s1 = 0.1,\n burnin=50,\n **kargs):\n\n self.reset()\n pop=sim.Population(size=[sizePop]*self.numPop, loci=self.loci, lociPos=list(range(self.dist, (self.dist*self.loci)+1,self.dist)), subPopNames=subPopNames, infoFields=infoFields)\n \n simu = sim.Simulator(pop)\n print(\"The simulation has started\")\n t1 = time.time()\n\n\n mutate_snps=range(0,50)+range(51,101)\n\n # define the initialization of each loci based the beta distribution where a and b parameters are allele frequencies from noncoding human regions\n snps=[0.14, 0.11, 0.17, 0.11, 0.32, 0.33, 0.21, 0.11, 0.11, 0.28, 0.11, 0.12, 0.8, 0.66, 0.74, 0.68, 0.66, 0.77, 0.77, 0.76, 0.77, 0.74, 0.72, 0.11, 0.73, 0.72, 0.72, 0.72, 0.54, 0.17, 0.78, 0.64, 0.78, 0.2, 0.24, 0.25, 0.78, 0.66, 0.2, 0.14, 0.75, 0.16, 0.72, 0.18, 0.77, 0.42, 0.34, 0.7, 0.17, 0.14, 0.2, 0.46, 0.13, 0.26, 0.16, 0.13, 0.14, 0.24, 0.18, 0.36, 0.71, 0.27, 0.28, 0.25, 0.25, 0.3, 0.19, 0.14, 0.16, 0.3, 0.39, 0.16, 0.24, 0.32, 0.11, 0.18, 0.48, 0.31, 0.21, 0.15, 0.34, 0.71, 0.33, 0.18, 0.71, 0.13, 0.23, 0.2, 0.22, 0.23, 0.16, 0.23, 0.23, 0.22, 0.24, 0.82, 0.36, 0.37, 0.72, 0.16, 0.14]\n self.initFreq=[]\n\n \n for i in range(len(snps)):\n alpha=float(4*sizePop*migration_rate*snps[i])\n bhta=float(4*sizePop*migration_rate*(1-snps[i])) \n p=numpy.random.beta(alpha,bhta)\n while (p>=0.9 or p<=0.1):\n p=numpy.random.beta(alpha,bhta)\n \n print \" SNP {snp} with alpha {alpha}, bhta {bhta} and frequency {p}\".format(snp=i, alpha=alpha, bhta=bhta, p=p)\n self.initFreq.append(p)\n\n simu.evolve(\n \n initOps=[sim.InitGenotype(freq=[self.initFreq[i], 1-self.initFreq[i]], loci=i) for i in range(len(snps))],\n \n\n # initialize the sex and select the 50 loci (parents)\n preOps = [sim.InitSex(maleProp=0.5,at=[0]),\n\n # initialize the genotype of locus 50 at generation 0 (in the beginning of the simulation)\n sim.PyOperator(self.genotypeBegin,at=[0]),\n \n # Wait 50 generations for the system to reach equilibrium\n # Then, change the the genotype of locus 50 at generation 50 by inserting a single copy of allele 0 in one individual \n sim.PyOperator(self.genotypeAfter,at=[50]),\n\n # function that carries out the selection proccess\n sim.MaSelector(loci=50,wildtype=0,fitness=[1+s1, 1+s1/2, 1],begin=50, end=-1,subPops=1)],\n\n # recombination\n matingScheme=sim.RandomMating(ops=[\n sim.Recombinator(rates=recombination_rate)]),\n \n # mutation and migration of offsprings\n postOps = [\n\n \n sim.SNPMutator(u=mutation_rate,loci=mutate_snps),\n \n # call function to calculate Fst and check for equilibrium state\n sim.PyOperator(self.calcFst,step=step),\n\n #migration\n # Here we define an island model, but this can easily be changed.\n # For more information about the migration models, please look in the documentation of SimuPOP here http://simupop.sourceforge.net/manual_svn/build/userGuide_ch7_sec3.html\n sim.Migrator(sim.utils.migrIslandRates(migration_rate,self.numPop)),\n \n # call function to save the allele frequencies\n sim.PyOperator(self.checkAlleles, step=step, param = subPopNames),\n \n \n # check if locus 50 is lost due to genetic drift. If yes, we terminate the simulation\n sim.Stat(alleleFreq=50,step=step,subPops=1,begin=50,end=-1),\n sim.TerminateIf('alleleFreq[50][0] == 0',step=step,begin=50,end=-1),\n \n # check the progress of the simulation\n sim.PyEval('\"Gen: %d\" % gen',step=step),\n sim.PyOutput('\\n',step=step),\n \n ],\n gen=self.Gen\n \n )\n \n \n t2 = time.time()\n print \"simulation took\", t2-t1, \"seconds.\"", "def CalculatePaths(self):\n agrid = self.agrid \n self.apath = array([0 for y in range(self.T)], dtype=float)\n self.cpath = array([0 for y in range(self.T)], dtype=float)\n self.npath = array([0 for y in range(self.T)], dtype=float)\n # generate each generation's asset, consumption and labor supply forward\n for y in range(self.T-1): # y = 0, 1,..., 58\n self.apath[y+1] = max(0,interp1d(agrid, self.a[y], kind='cubic')(self.apath[y]))\n if y >= self.W:\n self.cpath[y], self.npath[y] = (1+self.r)*self.apath[y] + self.b - self.apath[y+1], 0\n else:\n self.cpath[y], self.npath[y] = self.solve(self.apath[y], self.apath[y+1])\n self.upath[y] = self.util(self.cpath[y], self.npath[y])\n # the oldest generation's consumption and labor supply\n self.cpath[self.T-1], self.npath[self.T-1] = (1+self.r)*self.apath[self.T-1]+self.b, 0\n self.upath[self.T-1] = self.util(self.cpath[self.T-1], self.npath[self.T-1])", "def iteration(self):\n T = self.generate_T()\n R = self.reproduce(T)\n self.P = self.choose_mi_best(R)\n #print(self.P)", "def shannon_parry_markov_chain(self):\n from sage.modules.free_module_element import vector\n if not self.is_deterministic():\n raise NotImplementedError(\"Automaton must be deterministic.\")\n if not self.digraph().is_aperiodic():\n raise NotImplementedError(\"Automaton must be aperiodic.\")\n if not self.digraph().is_strongly_connected():\n raise NotImplementedError(\"Automaton must be strongly connected.\")\n if not all(s.is_final for s in self.iter_states()):\n raise NotImplementedError(\"All states must be final.\")\n from sage.rings.integer_ring import ZZ\n M = self.adjacency_matrix().change_ring(ZZ)\n states = {state: i for i, state in enumerate(self.iter_states())}\n w_all = sorted(M.eigenvectors_right(),\n key=lambda x: abs(x[0]),\n reverse=True)\n w = w_all[0][1][0]\n mu = w_all[0][0]\n u_all = sorted(M.eigenvectors_left(),\n key=lambda x: abs(x[0]),\n reverse=True)\n u = u_all[0][1][0]\n u = 1/(u*w) * u\n final = vector(int(s.is_final) for s in self.iter_states())\n ff = u*final\n\n assert u*w == 1\n P = Transducer(initial_states=[s.label() for s in self.iter_initial_states()],\n final_states=[s.label() for s in self.iter_final_states()],\n on_duplicate_transition=duplicate_transition_add_input)\n for t in self.iter_transitions():\n P.add_transition(t.from_state.label(),\n t.to_state.label(),\n w[states[t.to_state]]/w[states[t.from_state]]/mu,\n t.word_in)\n for s in self.iter_states():\n P.state(s.label()).color = 1/(w[states[s]] * ff)\n P.state(s.label()).initial_probability = w[states[s]] * u[states[s]]\n return P", "def backtrace(sequence, M, P, i, j):\n if j <= i:\n return\n\n if M[i][j] == M[i][j-1]:\n backtrace(sequence, M, P, i, j-1)\n\n else:\n for k in range(i, j):\n if costFunction(sequence[k], sequence[j]):\n if k-1 < 0:\n if M[i][j] == M[k+1][j-1]+1:\n if (k, j) not in P:\n P.append((k, j))\n backtrace(sequence, M, P, k+1, j-1)\n if M[i][j] == M[i, k-1] + M[k+1][j-1] + 1:\n if (k, j) not in P:\n P.append((k, j))\n backtrace(sequence, M, P, i, k-1)\n backtrace(sequence, M, P, k+1, j-1)\n break", "def __prepareTransitions(self):\n self.transition_matrix=self.markovModel._get_transmat()[:]\n for i in range(len(self.transition_matrix)):\n index=np.argmax(self.transition_matrix[i,:])\n if index==i:\n self.transition_matrix[i,index]=0", "def update_trip_path(trip_mpois, paths, graph):\n n_nodes = len(trip_mpois)\n # adjacency matrix\n new_paths = np.zeros(shape=(n_nodes, n_nodes))\n\n # iterate through all the nodes and create a list of nodes with sequential id\n for i, node1 in enumerate(trip_mpois):\n for j, node2 in enumerate(trip_mpois):\n new_paths[i, j] = paths[node1, node2]\n\n # new_paths = new_paths/np.max(new_paths[new_paths < _INF])\n # new_paths[np.isinf(new_paths)] = _INF\n\n # create a dummy edge between end and start node with weight 0\n new_paths[1,0] = -_INF\n # new_paths[0,1] = _INF\n\n shortest_path = None\n if n_nodes > 5:\n shortest_path, dist = tsp.solve(n_nodes, new_paths)\n # shortest_path = range(n_nodes)\n else:\n shortest_path = range(n_nodes)\n\n trip_path = np.array(trip_mpois)[shortest_path]\n\n if ___DEBUG:\n fname = 'dump/' + str(n_nodes) + '.dist'\n np.savetxt(fname, new_paths, fmt='%.6f')\n \n mpoi_pos = np.zeros(shape=(n_nodes,2))\n \n for i, node in enumerate(trip_mpois):\n pos_3d = graph.vs[node]['position']\n assert node == graph.vs[node].index\n mpoi_pos[i,:] = pos_3d[:2]\n\n fname = 'dump/' + str(n_nodes) + '.pos'\n np.savetxt(fname, mpoi_pos)\n \n # print trip_mpois, trip_path\n\n return trip_path" ]
[ "0.705838", "0.6940395", "0.67826474", "0.67684615", "0.67196417", "0.65749055", "0.65712386", "0.6491848", "0.64757305", "0.6461549", "0.64065045", "0.6359924", "0.62643", "0.6219971", "0.6213666", "0.6170746", "0.6034146", "0.602727", "0.58137655", "0.57686436", "0.57664907", "0.57605934", "0.5743911", "0.57386005", "0.57244265", "0.5720753", "0.56915116", "0.56718415", "0.5642274", "0.56376326", "0.56376326", "0.5636088", "0.5631005", "0.5621502", "0.56152743", "0.5607391", "0.5586736", "0.5578226", "0.55647403", "0.5563835", "0.55608684", "0.554178", "0.5539804", "0.5533945", "0.5529704", "0.5528137", "0.5509813", "0.5498589", "0.5486743", "0.54770494", "0.5476871", "0.547047", "0.5469412", "0.5465959", "0.54546505", "0.54507095", "0.5447508", "0.5436431", "0.5431114", "0.54209405", "0.5414575", "0.54124576", "0.54075867", "0.5405395", "0.54016316", "0.5389142", "0.53889304", "0.53828764", "0.53795415", "0.53703403", "0.5369978", "0.5363323", "0.5356296", "0.5353073", "0.5351991", "0.53504205", "0.53503495", "0.53435963", "0.53396153", "0.53328305", "0.5332089", "0.53286284", "0.5317849", "0.5312761", "0.5312235", "0.53111064", "0.5301952", "0.5300085", "0.52854985", "0.52854806", "0.52844167", "0.5282052", "0.5274572", "0.5271752", "0.52688485", "0.52656376", "0.5265393", "0.5262417", "0.5259362", "0.5259338" ]
0.5941896
18
np.broadcast_shapes requires `numpy==1.20.0`, which is not available for `python < 3.7`.
def broadcast_shapes(*shapes: Tuple[int, ...]) -> Tuple[int, ...]: arrays = [np.empty(shape) for shape in shapes] return np.broadcast(*arrays).shape
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_broadcast_dims():\r\n test((1, 2, 3))\r\n test((2, 1, 3))\r\n test((2, 3, 1))\r\n test2((1, 2, 3))\r\n test2((2, 1, 3))\r\n test2((2, 3, 1))", "def broadcast_shape(*shapes, **kwargs):\n strict = kwargs.pop(\"strict\", False)\n reversed_shape = []\n for shape in shapes:\n for i, size in enumerate(reversed(shape)):\n if i >= len(reversed_shape):\n reversed_shape.append(size)\n elif reversed_shape[i] == 1 and not strict:\n reversed_shape[i] = size\n elif reversed_shape[i] != size and (size != 1 or strict):\n raise ValueError(\n \"shape mismatch: objects cannot be broadcast to a single shape: {}\".format(\n \" vs \".join(map(str, shapes))\n )\n )\n return tuple(reversed(reversed_shape))", "def broadcast_shapes(*args):\n x = list(np.atleast_1d(args[0])) if args else ()\n for arg in args[1:]:\n y = list(np.atleast_1d(arg))\n if len(x) < len(y):\n x, y = y, x\n x[-len(y):] = [j if i == 1 else i if j == 1 else i if i == j else 0\n for i, j in zip(x[-len(y):], y)]\n if not all(x):\n return None\n return tuple(x)", "def _infer_ndim_bcast(ndim, shape, *args):\r\n\r\n # Find the minimum value of ndim required by the *args\r\n if args:\r\n args_ndim = max(arg.ndim for arg in args)\r\n else:\r\n args_ndim = 0\r\n\r\n if isinstance(shape, (tuple, list)):\r\n # there is a convention that -1 means the corresponding shape of a\r\n # potentially-broadcasted symbolic arg\r\n #\r\n # This case combines together symbolic and non-symbolic shape\r\n # information\r\n shape_ndim = len(shape)\r\n if ndim is None:\r\n ndim = shape_ndim\r\n else:\r\n if shape_ndim != ndim:\r\n raise ValueError('ndim should be equal to len(shape), but\\n',\r\n 'ndim = %s, len(shape) = %s, shape = %s'\r\n % (ndim, shape_ndim, shape))\r\n\r\n bcast = []\r\n pre_v_shape = []\r\n for i, s in enumerate(shape):\r\n if hasattr(s, 'type'): # s is symbolic\r\n bcast.append(False) # todo - introspect further\r\n pre_v_shape.append(s)\r\n else:\r\n if s >= 0:\r\n pre_v_shape.append(tensor.as_tensor_variable(s))\r\n bcast.append((s == 1))\r\n elif s == -1:\r\n n_a_i = 0\r\n for a in args:\r\n # ndim: _ _ _ _ _ _\r\n # ashp: s0 s1 s2 s3\r\n # i\r\n if i >= ndim - a.ndim:\r\n n_a_i += 1\r\n a_i = i + a.ndim - ndim\r\n if not a.broadcastable[a_i]:\r\n pre_v_shape.append(a.shape[a_i])\r\n bcast.append(False)\r\n break\r\n else:\r\n if n_a_i == 0:\r\n raise ValueError(('Auto-shape of -1 must overlap'\r\n 'with the shape of one of the broadcastable'\r\n 'inputs'))\r\n else:\r\n pre_v_shape.append(tensor.as_tensor_variable(1))\r\n bcast.append(True)\r\n else:\r\n ValueError('negative shape', s)\r\n # post-condition: shape may still contain both symbolic and\r\n # non-symbolic things\r\n if len(pre_v_shape) == 0:\r\n v_shape = tensor.constant([], dtype='int32')\r\n else:\r\n v_shape = tensor.stack(*pre_v_shape)\r\n\r\n elif shape is None:\r\n # The number of drawn samples will be determined automatically,\r\n # but we need to know ndim\r\n if not args:\r\n raise TypeError(('_infer_ndim_bcast cannot infer shape without'\r\n ' either shape or args'))\r\n template = reduce(lambda a, b: a + b, args)\r\n v_shape = template.shape\r\n bcast = template.broadcastable\r\n ndim = template.ndim\r\n else:\r\n v_shape = tensor.as_tensor_variable(shape)\r\n if ndim is None:\r\n ndim = tensor.get_vector_length(v_shape)\r\n bcast = [False] * ndim\r\n\r\n if (not (v_shape.dtype.startswith('int') or\r\n v_shape.dtype.startswith('uint'))):\r\n raise TypeError('shape must be an integer vector or list',\r\n v_shape.dtype)\r\n\r\n if args_ndim > ndim:\r\n raise ValueError(\r\n 'ndim should be at least as big as required by args value',\r\n (ndim, args_ndim), args)\r\n\r\n assert ndim == len(bcast)\r\n return ndim, tensor.cast(v_shape, 'int32'), tuple(bcast)", "def broadcast_arrays(*args):\n args = [np.asarray(_m) for _m in args]\n shapes = [x.shape for x in args]\n if len(set(shapes)) == 1:\n # Common case where nothing needs to be broadcasted.\n return args\n shapes = [list(s) for s in shapes]\n strides = [list(x.strides) for x in args]\n nds = [len(s) for s in shapes]\n biggest = max(nds)\n # Go through each array and prepend dimensions of length 1 to each of\n # the shapes in order to make the number of dimensions equal.\n for i in range(len(args)):\n diff = biggest - nds[i]\n if diff > 0:\n shapes[i] = [1] * diff + shapes[i]\n strides[i] = [0] * diff + strides[i]\n # Chech each dimension for compatibility. A dimension length of 1 is\n # accepted as compatible with any other length.\n common_shape = []\n for axis in range(biggest):\n lengths = [s[axis] for s in shapes]\n unique = set(lengths + [1])\n if len(unique) > 2:\n # There must be at least two non-1 lengths for this axis.\n raise ValueError(\"shape mismatch: two or more arrays have \"\n \"incompatible dimensions on axis %r.\" % (axis,))\n elif len(unique) == 2:\n # There is exactly one non-1 length. The common shape will take\n # this value.\n unique.remove(1)\n new_length = unique.pop()\n common_shape.append(new_length)\n # For each array, if this axis is being broadcasted from a\n # length of 1, then set its stride to 0 so that it repeats its\n # data.\n for i in range(len(args)):\n if shapes[i][axis] == 1:\n shapes[i][axis] = new_length\n strides[i][axis] = 0\n else:\n # Every array has a length of 1 on this axis. Strides can be\n # left alone as nothing is broadcasted.\n common_shape.append(1)\n\n # Construct the new arrays.\n broadcasted = [as_strided(x, shape=sh, strides=st) for (x, sh, st) in\n zip(args, shapes, strides)]\n return broadcasted", "def shape_to_broadcast(shape):\n return tuple(n==1 for n in shape)", "def generalized_broadcast(arrays):\n arrays1 = np.broadcast_arrays(*[A[..., 0] for A in arrays])\n shapes_b = [A1.shape + (A.shape[-1],) for A1, A in zip(arrays1, arrays)]\n strides_b = [A1.strides + (A.strides[-1],) for A1, A in zip(arrays1, arrays)]\n arrays_b = [as_strided(A, shape=shape_Ab, strides=strides_Ab)\n for A, shape_Ab, strides_Ab in zip(arrays, shapes_b, strides_b)]\n return arrays_b", "def testBroadcastDimension(self, axis, row_length, original_dim_sizes,\n broadcast_dim_sizes):\n original_shape = RaggedTensorDynamicShape.from_dim_sizes(original_dim_sizes)\n bcast_shape = RaggedTensorDynamicShape.from_dim_sizes(broadcast_dim_sizes)\n self.assertEqual(original_shape.rank, bcast_shape.rank)\n # shape[axis].value == 1 and row_length > 1:\n bcast1 = original_shape.broadcast_dimension(axis, row_length)\n # shape[axis].value > 1 and row_length == shape[axis].value:\n bcast2 = bcast_shape.broadcast_dimension(axis, row_length)\n # shape[axis].value > 1 and row_length == 1:\n bcast3 = bcast_shape.broadcast_dimension(axis, 1)\n\n self.assertShapeEq(bcast1, bcast_shape)\n self.assertShapeEq(bcast2, bcast_shape)\n self.assertShapeEq(bcast3, bcast_shape)", "def promote_shapes(*args):\n if len(args) < 2:\n return args\n else:\n shapes = [jnp.shape(arg) for arg in args]\n batch_shape = lax.broadcast_shapes(*shapes)\n num_dims = len(batch_shape)\n return [\n jnp.reshape(arg, (1,) * (num_dims - len(s)) + s)\n if len(s) < num_dims\n else arg\n for arg, s in zip(args, shapes)\n ]", "def broadcast_to(x, shape):\n if x.shape == shape:\n return chainer.as_variable(x)\n y, = BroadcastTo(shape).apply((x,))\n return y", "def can_broadcast(shape1, shape2) -> bool:\n return(\n reduce(\n lambda a, b: a and b,\n starmap(\n lambda a, b: (a == b or (a == 1 or b == 1)),\n zip_longest(shape1, shape2, fillvalue=1)\n )\n )\n )", "def test_broadcast(self):\n a = np.ones((3, 4, 1))\n ai = np.ones((1, 2, 5), dtype=np.intp)\n actual = take_along_axis(a, ai, axis=1)\n assert_equal(actual.shape, (3, 2, 5))", "def test_cross_multiply_shape():\n array_1 = np.ones((1, 3))\n array_out = utils.cross_multiply_array(array_1, axis=1)\n assert (1, 3, 3) == array_out.shape", "def broadcast_rule(shape_a, shape_b):\n assert(isinstance(shape_a, tuple))\n assert(isinstance(shape_b, tuple))\n if len(shape_a) > len(shape_b):\n longer_shape, shorter_shape = shape_a, shape_b\n else:\n longer_shape, shorter_shape = shape_b, shape_a\n len_diff = len(longer_shape) - len(shorter_shape)\n for i in range(len_diff):\n # pad with leading 1s\n shorter_shape = (1,) + shorter_shape\n assert len(shorter_shape) == len(longer_shape)\n output_shape = list(longer_shape)\n for i in range(len(output_shape)):\n assert (shorter_shape[i] == longer_shape[i]) \\\n or (shorter_shape[i] == 1) \\\n or (longer_shape[i] == 1)\n output_shape[i] = max(shorter_shape[i], longer_shape[i])\n return tuple(output_shape)", "def _broadcast_shape(\n data, rank, world_size, num_parts, is_feat_data, feat_name\n):\n assert len(data.shape) in [\n 1,\n 2,\n ], f\"Data is expected to be 1-D or 2-D but got {data.shape}.\"\n data_shape = list(data.shape)\n\n if len(data_shape) == 1:\n data_shape.append(1)\n\n if is_feat_data:\n data_shape.append(DATA_TYPE_ID[data.dtype])\n\n data_shape = torch.tensor(data_shape, dtype=torch.int64)\n data_shape_output = [\n torch.zeros_like(data_shape) for _ in range(world_size)\n ]\n dist.all_gather(data_shape_output, data_shape)\n logging.debug(\n f\"[Rank: {rank} Received shapes from all ranks: {data_shape_output}\"\n )\n shapes = [x.numpy() for x in data_shape_output if x[0] != 0]\n shapes = np.vstack(shapes)\n\n if is_feat_data:\n logging.debug(\n f\"shapes: {shapes}, condition: {all(shapes[0,2] == s for s in shapes[:,2])}\"\n )\n assert all(\n shapes[0, 2] == s for s in shapes[:, 2]\n ), f\"dtypes for {feat_name} does not match on all ranks\"\n\n # compute tids here.\n type_counts = list(shapes[:, 0])\n tid_start = np.cumsum([0] + type_counts[:-1])\n tid_end = np.cumsum(type_counts)\n tid_ranges = list(zip(tid_start, tid_end))\n logging.debug(f\"starts -> {tid_start} ... end -> {tid_end}\")\n\n return tid_ranges", "def test_bootstrap_array_shape():\n test_array = np.zeros((3, 4))\n test_axis = 1\n nboot = 5\n new_array = utils.bootstrap_array(test_array, nboot=nboot, axis=test_axis)\n shape = (3, 4, 5)\n assert shape == new_array.shape", "def test_preserve_broadcastable(self):\r\n x = tensor.matrix().dimshuffle('x', 0, 'x', 1, 'x')\r\n y = x.max(axis=1)\r\n assert y.type.broadcastable == (True, True, False, True)", "def test_convolve_broadcast(self, fn, x_shape, y_shape):\n # 1. Test broadcast case\n x = torch.rand(x_shape, dtype=self.dtype, device=self.device)\n y = torch.rand(y_shape, dtype=self.dtype, device=self.device)\n out1 = getattr(F, fn)(x, y)\n # 2. Test without broadcast\n y_clone = y.expand(x_shape).clone()\n assert y is not y_clone\n assert y_clone.shape == x.shape\n out2 = getattr(F, fn)(x, y_clone)\n # check that they are same\n self.assertEqual(out1, out2)", "def relay_distribute(c, array, shape):\n assert shape.is_constant(tuple)\n # Make sure shape is a tuple of builtin Python integers.\n relay_shape = tuple(int(dim) for dim in shape.value)\n return relay.op.broadcast_to(c.ref(array), relay_shape)", "def test_broadcast(self):\n a = np.ones((3, 4, 1))\n ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4\n put_along_axis(a, ai, 20, axis=1)\n assert_equal(take_along_axis(a, ai, axis=1), 20)", "def match_shapes(arrs):\n #temp = [(name, np.asarray(a), deg) for name, a, deg in arrs]\n #ndim = max([a.ndim - deg for _, a, deg in arrs])\n\n temp = [a for name, a, deg in arrs]\n for i in range(len(temp)):\n if np.isscalar(temp[i]):\n temp[i] = np.array(temp[i])\n ndim = max([a.ndim - deg for a, (_, _, deg) in zip(temp, arrs)])\n\n prep_arrs = []\n for name, a, deg in arrs:\n if np.isscalar(a):\n a = np.asarray(a)\n if a.ndim < deg:\n raise RuntimeError('%s.ndim must be at least %d' % (name, deg))\n if a.ndim < ndim + deg:\n #a = a.reshape((1,) * (ndim + deg - a.ndim) + a.shape)\n slc = (nax,) * (ndim + deg - a.ndim) + (Ellipsis,)\n a = a[slc]\n prep_arrs.append(a)\n\n return prep_arrs", "def _fix_bias_shape(self, op_name, inputs, attrs):\n if (op_name == 'Add' or op_name == 'Mul') and \\\n ('broadcast' in attrs and attrs['broadcast'] == 1):\n assert len(list(inputs)) == 2\n bias_name = self._renames.get(inputs[1], inputs[1])\n bias = self._params[bias_name]\n assert len(bias.shape) == 1\n # reshape to (1, n)\n bias = mx.nd.array(bias.asnumpy().reshape((1, -1, 1, 1)))\n # broadcast_add expects shape with sym.variable\n self._nodes[bias_name] = mx.sym.Variable(name=bias_name, shape=bias.shape)\n self._params[bias_name] = bias", "async def infer_shape_broadcast_shape(track, shpx, shpy):\n tx = await shpx['type']\n ty = await shpy['type']\n n = max(len(tx.elements), len(ty.elements))\n return TupleShape([NOSHAPE] * n)", "def test_unbroadcast_addbroadcast(self):\r\n\r\n x = matrix()\r\n assert unbroadcast(x, 0) is x\r\n assert unbroadcast(x, 1) is x\r\n assert unbroadcast(x, 1, 0) is x\r\n assert unbroadcast(x, 0, 1) is x\r\n\r\n assert addbroadcast(x, 0) is not x\r\n assert addbroadcast(x, 1) is not x\r\n assert addbroadcast(x, 1, 0).owner.inputs[0] is x\r\n\r\n assert unbroadcast(addbroadcast(x, 0), 0) is x\r\n assert addbroadcast(unbroadcast(x, 0), 0) is not x\r\n x = row()\r\n assert unbroadcast(x, 0) is not x\r\n assert unbroadcast(x, 1) is x\r\n assert unbroadcast(x, 1, 0) is not x\r\n assert unbroadcast(x, 0, 1) is not x\r\n\r\n assert addbroadcast(x, 0) is x\r\n assert addbroadcast(x, 1).owner.inputs[0] is x\r\n assert addbroadcast(x, 1, 0).owner.inputs[0] is x\r\n assert addbroadcast(x, 0, 1).owner.inputs[0] is x\r\n\r\n assert unbroadcast(addbroadcast(x, 1), 1) is x\r\n assert addbroadcast(unbroadcast(x, 1), 1) is not x\r\n\r\n # The first broadcast is remove the broadcast, so the second\r\n # should not make one\r\n assert unbroadcast(unbroadcast(x, 0), 0).owner.inputs[0] is x\r\n\r\n # Test that consecutive Rebroadcast op are fused\r\n x = TensorType(dtype='float64', broadcastable=(True, True))()\r\n assert unbroadcast(unbroadcast(x, 1), 0).owner.inputs[0] is x\r\n assert addbroadcast(unbroadcast(x, 1), 0).owner.inputs[0] is x\r\n assert addbroadcast(unbroadcast(x, 0), 0) is x", "def conv_broadcast(x, kernel_shape, padding, strides):\n if len(kernel_shape) == 2:\n return conv2d_broadcast(x, kernel_shape[0], kernel_shape[1],\n padding, strides)\n elif len(kernel_shape) == 1:\n return conv1d_broadcast(x, kernel_shape[0], padding, strides[0])\n else:\n raise ValueError()", "def test_shape_error(self):\n raise unittest.SkipTest(\"Failing after fixing Poly unsoundness #4878\")\n with self.assertRaisesRegex(TypeError,\n re.escape(\"add got incompatible shapes for broadcasting: (v,), (4,)\")):\n self.CheckShapePolymorphism(\n lambda x, y: x + y,\n input_signature=[tf.TensorSpec([None]), tf.TensorSpec([4])],\n in_shapes=[\"(v,)\", \"(4,)\"],\n expected_output_signature=tf.TensorSpec([None]))\n\n four_ones = np.ones((4,))\n # We get the error even if we use correct actual arguments\n with self.assertRaisesRegex(TypeError,\n re.escape(\"add got incompatible shapes for broadcasting: (v,), (4,)\")):\n jax2tf.convert(lambda x, y: x + y,\n in_shapes=[\"(v,)\", \"(4,)\"])(four_ones, four_ones)\n\n with self.assertRaisesRegex(TypeError,\n re.escape(\"dot_general requires contracting dimensions to have the same shape, got [4] and [v].\")):\n jax2tf.convert(lambda x: jnp.matmul(x, x),\n in_shapes=[\"(v, 4)\"])(np.ones((4, 4)))\n\n # TODO: this is an opportunity to improve the translation, should not error\n with self.assertRaisesRegex(TypeError,\n \"Only integers, .* tensors are valid indices, got 0\"):\n jax2tf.convert(lambda x: jnp.split(x, 2),\n in_shapes=[\"(2*v,)\"])(four_ones)", "def broadcast_array(array, axis_index, shape):\n\n if type(axis_index) in [float, int]:\n start_axis_index = end_axis_index = axis_index\n else:\n assert len(axis_index) == 2\n start_axis_index, end_axis_index = axis_index\n \n dim = start_axis_index - 1\n while dim >= 0:\n array = array[numpy.newaxis, ...]\n array = numpy.repeat(array, shape[dim], axis=0)\n dim = dim - 1\n \n dim = end_axis_index + 1\n while dim < len(shape): \n array = array[..., numpy.newaxis]\n array = numpy.repeat(array, shape[dim], axis=-1)\n dim = dim + 1\n\n return array", "def _fix_shape(self, value):\n for k, v in self.variables.items():\n if len(v.shape) < len(value.shape):\n a, b = self._broadcast(value, v)\n self.variables[k] = np.zeros(a.shape, dtype=b.dtype) + b", "def test_broadcastable_flag_assignment_mixed_thisaxes(self):\r\n rng = numpy.random.RandomState(seed=utt.fetch_seed())\r\n a_val = rng.rand(2, 4, 1).astype(self.floatX)\r\n b_val = rng.rand(1, 4, 1).astype(self.floatX)\r\n\r\n a = self.shared(a_val, broadcastable=(False, False, True))\r\n b = self.shared(b_val, broadcastable=(True, False, True))\r\n c = self.join_op()(0, a, b)\r\n assert not c.type.broadcastable[0]\r\n\r\n f = function([], c, mode=self.mode)\r\n topo = f.maker.fgraph.toposort()\r\n assert [True for node in topo if isinstance(node.op, self.join_op)]\r\n\r\n f()\r\n utt.verify_grad((lambda a, b: join(0, a, b)), [a_val, b_val], rng=rng)\r\n # Should raise an error if b_val.shape[0] is not 1\r\n # We can't set the value|\r\n self.assertRaises(TypeError, b.set_value,\r\n rng.rand(3, 4, 1).astype(self.floatX))\r\n a = TensorType(dtype=self.floatX, broadcastable=[0, 0, 1])()\r\n b = TensorType(dtype=self.floatX, broadcastable=[1, 0, 1])()\r\n c = join(0, a, b)\r\n f = function([a, b], c, mode=self.mode)\r\n bad_b_val = rng.rand(3, 4, 1).astype(self.floatX)\r\n self.assertRaises(TypeError, f, a_val, bad_b_val)", "def testMaskErrorIncompatibleRank3(self):\n\n np_mask = np.ones((2, 4, 4))\n x = tf.constant(0.0, shape=(2, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv1D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def _assert_is_batched(self, *arrays):\n shape_list = []\n for array in arrays:\n if isinstance(array, tf.Tensor):\n shape_list.append(array.shape.as_list())\n else:\n shape_list.append(np.shape(array))\n # All arrays should have at least two dimensions.\n assert all([len(shape) >= 2 for shape in shape_list])\n # All arrays should have the same batch size.\n assert len(set([shape[0] for shape in shape_list])) == 1", "def test_broadcastable_flag_assignment_mixed_otheraxes(self):\r\n rng = numpy.random.RandomState(seed=utt.fetch_seed())\r\n a_val = rng.rand(1, 4, 1).astype(self.floatX)\r\n b_val = rng.rand(1, 3, 1).astype(self.floatX)\r\n\r\n a = self.shared(a_val, broadcastable=(False, False, True))\r\n b = self.shared(b_val, broadcastable=(True, False, True))\r\n c = self.join_op()(1, a, b)\r\n assert c.type.broadcastable[0] and c.type.broadcastable[2]\r\n assert not c.type.broadcastable[1]\r\n\r\n # Opt can remplace the int by a Theano constant\r\n c = self.join_op()(theano.tensor.constant(1), a, b)\r\n assert c.type.broadcastable[0] and c.type.broadcastable[2]\r\n assert not c.type.broadcastable[1]\r\n\r\n # In case futur opt insert other useless stuff\r\n c = self.join_op()(theano.tensor.cast(theano.tensor.constant(1),\r\n dtype=\"int32\"),\r\n a, b)\r\n assert c.type.broadcastable[0] and c.type.broadcastable[2]\r\n assert not c.type.broadcastable[1]\r\n\r\n f = function([], c, mode=self.mode)\r\n topo = f.maker.fgraph.toposort()\r\n assert [True for node in topo if isinstance(node.op, self.join_op)]\r\n\r\n f()\r\n utt.verify_grad((lambda a, b: join(1, a, b)), [a_val, b_val], rng=rng)\r\n\r\n # Should raise an error if dimension 0 does not match\r\n a.set_value(rng.rand(2, 4, 1).astype(self.floatX))\r\n self.assertRaises(ValueError, f)", "def blend_shapes(betas, shape_disps):\n blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])\n return blend_shape", "def blend_shapes(betas, shape_disps):\n blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])\n return blend_shape", "def assert_equal_shapes(numpy_arrays: list):\n\n if len(numpy_arrays) < 2:\n return\n\n shapes = np.asarray([np.shape(_arr) for _arr in numpy_arrays]).astype(float)\n mean = np.mean(shapes, axis=0)\n for i in range(len(shapes)):\n shapes[i, :] = shapes[i, :] - mean\n\n if not np.sum(np.abs(shapes)) <= 1e-5:\n raise AssertionError(\"The given volumes did not all have the same\"\n \" dimensions. Please double check the simulation\"\n f\" parameters. Called from {inspect.stack()[1].function}\")", "def test_cross_multiply_array_different_shapes():\n array_1 = np.zeros((1, 2, 3))\n array_2 = np.zeros((2, 3, 4))\n axis = 2\n pytest.raises(ValueError, utils.cross_multiply_array, array_1, array_2, axis)", "def broadcastable(self):\n return tuple(s==1 for s in self.shape)", "def testMaskErrorIncompatibleRank1(self):\n\n np_mask = np.ones((3,))\n x = tf.constant(0.0, shape=(2, 8, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def testMaskErrorIncompatibleRank2(self):\n\n np_mask = np.ones((3, 3))\n x = tf.constant(0.0, shape=(2, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv1D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def testMaskErrorIncompatibleRank3(self):\n\n np_mask = np.ones((2, 4, 4))\n x = tf.constant(0.0, shape=(2, 8, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def broadcast_arrays(*args):\n from .dataarray import DataArray\n\n all_indexes = _get_all_indexes(args)\n for k, v in all_indexes.items():\n if not all(v[0].equals(vi) for vi in v[1:]):\n raise ValueError('cannot broadcast arrays: the %s index is not '\n 'aligned (use xray.align first)' % k)\n\n vars = broadcast_variables(*[a.variable for a in args])\n indexes = dict((k, all_indexes[k][0]) for k in vars[0].dims)\n\n arrays = []\n for a, v in zip(args, vars):\n arr = DataArray(v.values, indexes, v.dims, a.name, a.attrs, a.encoding)\n for k, v in a.coords.items():\n arr.coords[k] = v\n arrays.append(arr)\n\n return tuple(arrays)", "def test_convolve_input_dim_check(self, case, fn, x_shape, y_shape):\n x = torch.rand(*x_shape, dtype=self.dtype, device=self.device)\n y = torch.rand(*y_shape, dtype=self.dtype, device=self.device)\n\n message = [\n \"The operands must be the same dimension\",\n \"Leading dimensions of x and y are not broadcastable\",\n ][case]\n with self.assertRaisesRegex(ValueError, message):\n fn(x, y)", "def test_broadcast_add_diff_shapes(self):\n tensor1 = Tensor([[1, 2, 3], [4, 5, 6]], requires_grad = True) # (2, 3)\n tensor2 = Tensor([[7, 8, 9]], requires_grad = True) # (1, 3)\n\n tensor3 = tensor1 + tensor2\n tensor3.backward(Tensor([[1, 1, 1], [1, 1, 1]]))\n\n assert tensor1.grad.data.tolist() == [[1, 1, 1], [1, 1, 1]]\n assert tensor2.grad.data.tolist() == [[2, 2, 2]]", "def test_centroid_com_mask_shape():\n with pytest.raises(ValueError):\n mask = np.zeros((2, 2), dtype=bool)\n centroid_com(np.zeros((4, 4)), mask=mask)", "def makeSharedArrays(shapeList):\n out_arrays = []\n for shape in shapeList:\n nx,ny = shape\n arr = Array('d', nx*ny)\n out_arrays.append(np.frombuffer(arr.get_obj()).reshape(shape))\n\n return out_arrays", "def testMaskErrorIncompatibleRank4(self):\n\n np_mask = np.ones((3, 3, 4, 5))\n x = tf.constant(0.0, shape=(2, 8, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def testMaskErrorIncompatibleRank1(self):\n\n np_mask = np.ones((2,), dtype=np.float32)\n x = tf.constant(0.0, shape=(2, 8, 6, 5, 5), dtype=np.float32)\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv3D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def testMaskErrorIncompatibleRank1(self):\n\n np_mask = np.ones((3,))\n x = tf.constant(0.0, shape=(2, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv1D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def test_broadcastable_flags_all_broadcastable_on_joinaxis(self):\r\n rng = numpy.random.RandomState(seed=utt.fetch_seed())\r\n a_val = rng.rand(1, 4, 1).astype(self.floatX)\r\n b_val = rng.rand(1, 4, 1).astype(self.floatX)\r\n\r\n a = self.shared(a_val, broadcastable=(True, False, True))\r\n b = self.shared(b_val, broadcastable=(True, False, True))\r\n c = self.join_op()(0, a, b)\r\n assert not c.type.broadcastable[0]\r\n\r\n f = function([], c, mode=self.mode)\r\n topo = f.maker.fgraph.toposort()\r\n assert [True for node in topo if isinstance(node.op, self.join_op)]\r\n\r\n f()\r\n utt.verify_grad((lambda a, b: join(0, a, b)), [a_val, b_val], rng=rng)", "def broadcast() -> BroadcastDistribute:\n return _broadcast", "def addbroadcast(x, *axes):\n if is_theano_object(x):\n # T.addbroadcast only works with positive axes\n axes = [ ax if ax >= 0 else x.ndim + ax for ax in axes ]\n return T.addbroadcast(x, *axes)\n else:\n for ax in axes:\n if x.shape[ax] != 1:\n raise ValueError(\"Tried to make axis {} of a variable with shape {} broadcastable. \"\n \"Only dimensions with length 1 can be broadcasted.\"\n .format(ax, x.shape))\n return x", "def encompasses_broadcastable(b1, b2):\r\n if len(b1) < len(b2):\r\n return False\r\n b1 = b1[-len(b2):]\r\n return not any(v1 and not v2 for v1, v2 in zip(b1, b2))", "def shape(n_layers, n_wires, n_broadcast=None):\n if n_wires == 1:\n wire_dim = 1\n elif n_wires == 2:\n wire_dim = 3\n else:\n wire_dim = 2 * n_wires\n\n if n_broadcast:\n return n_broadcast, n_layers, wire_dim\n\n return n_layers, wire_dim", "def test_broadcast_add_diff_shapes_torch(self):\n tensor1 = Tensor([[1, 2, 3], [4, 5, 6]], requires_grad = True) # (2, 3)\n tensor2 = Tensor([[7, 8, 9]], requires_grad = True) # (1, 3)\n\n tensor3 = tensor1 + tensor2\n tensor3.backward(Tensor([[1, 1, 1], [1, 1, 1]]))\n\n assert tensor1.grad.data.tolist() == [[1, 1, 1], [1, 1, 1]]\n assert tensor2.grad.data.tolist() == [[2, 2, 2]]", "def _broadcast_to(x, shape_cur, shape_to, ndim_to):\n size = _tile_size(shape_cur, shape_to, ndim_to)\n return F.tile(x, size)", "def test_write_broadcast(self):\n dt = np.dtype('(3,)i')\n\n dset = self.f.create_dataset('x', (10,), dtype=dt)\n\n with self.assertRaises(TypeError):\n dset[...] = 42", "def test_elemwise_collapse():\r\n\r\n shape = (4, 5, 60)\r\n a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32'))\r\n a = theano._asarray(numpy.random.rand(*shape), dtype='float32')\r\n a2 = tcn.shared_constructor(a, 'a')\r\n a3 = a2.dimshuffle(0, 'x', 1, 2)\r\n b = tcn.CudaNdarrayType((False, True, False, False))()\r\n c = a3 + b\r\n f = pfunc([b], [c], mode=mode_with_gpu)\r\n\r\n v = theano._asarray(numpy.random.rand(shape[0], 1, *shape[1:]),\r\n dtype='float32')\r\n v = cuda_ndarray.CudaNdarray(v)\r\n\r\n #let debugmode catch errors\r\n out = f(v)[0]\r\n assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v)\r\n #print \"Expected collapse of all dimensions\"\r", "def test_cross_multiply_from_list():\n array_1 = np.ones((1, 3)).tolist()\n array_out = utils.cross_multiply_array(array_1, axis=1)\n assert (1, 3, 3) == array_out.shape", "def test_newaxis(self):\r\n newaxis = numpy.newaxis\r\n\r\n n = self.shared(numpy.arange(24, dtype=self.dtype).reshape((2, 3, 4)))\r\n assert n.ndim == 3\r\n\r\n n4 = n[newaxis, :, :, :]\r\n assert n4.broadcastable == (True, False, False, False), n4\r\n\r\n n4 = n[:, newaxis, :, :]\r\n assert n4.broadcastable == (False, True, False, False), n4\r\n\r\n n4 = n[:, :, newaxis, :]\r\n assert n4.broadcastable == (False, False, True, False), n4\r\n\r\n n4 = n[:, :, :, newaxis]\r\n assert n4.broadcastable == (False, False, False, True), n4\r\n\r\n n3 = n.flatten()[newaxis, :, newaxis]\r\n assert n3.broadcastable == (True, False, True), n3\r\n\r\n s = cscalar()\r\n s1 = s[newaxis]\r\n assert s1.broadcastable == (True,), s1\r\n\r\n vs1, vn3, vn4 = theano.function([s], [s1, n3, n4])(-2.0)\r\n\r\n assert numpy.all(vs1 == [-2.0])\r\n assert numpy.all(vn3\r\n == numpy.arange(24)[newaxis, :, newaxis])\r\n assert numpy.all(vn4\r\n == numpy.arange(24).reshape((2, 3, 4))[:, :, :, newaxis])", "def flatten_args(shapes):\n def flatten_args_dec(func):\n\n @wraps(func)\n def new_func(array1d, *args, **kwargs):\n args = tuple(unflatten(array1d, shapes)) + args\n return func(*args, **kwargs)\n\n return new_func\n\n return flatten_args_dec", "def same_nd(shape, stride, kernel_size):\n\n rshape = []\n for sh, st, sz in zip(shape, stride, kernel_size):\n rshape.append(int(same_x(sh, st, sz)))\n return rshape", "def testMaskErrorIncompatibleRank4(self):\n\n np_mask = np.ones((5, 5, 5, 2), dtype=np.float32)\n x = tf.constant(0.0, shape=(2, 8, 6, 5, 5), dtype=np.float32)\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv3D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def test_elemwise_collapse3():\r\n\r\n shape = (4, 5)\r\n a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32'))\r\n a = theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32')\r\n a2 = tcn.shared_constructor(a, 'a')\r\n a3 = a2.dimshuffle('x', 0, 1, 'x')\r\n b = tcn.CudaNdarrayType((False, False, False, False))()\r\n c = (a3 + b)\r\n f = pfunc([b], [c], mode=mode_with_gpu)\r\n\r\n v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4),\r\n dtype='float32')\r\n v = cuda_ndarray.CudaNdarray(v)\r\n\r\n #let debugmode catch errors\r\n out = f(v)[0]\r\n assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v)\r\n #print \"Expected collapse to 3 dimensions\"\r", "def nd_shape_checking(x, y, mvaxis, traxis):\n assert x.ndim == y.ndim\n dims = np.delete(np.arange(x.ndim), -2)\n assert all([x.shape[k] == y.shape[k] for k in dims])", "def testMaskErrorIncompatibleRank2(self):\n\n np_mask = np.ones((3, 3))\n x = tf.constant(0.0, shape=(2, 8, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def testMaskErrorIncompatibleRank2(self):\n\n np_mask = np.ones((5, 2), dtype=np.float32)\n x = tf.constant(0.0, shape=(2, 8, 6, 5, 5), dtype=np.float32)\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv3D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def make_pbroadcast_function(fn, in_axes, out_axes, out_dtype):\n\n if not isinstance(in_axes, tuple):\n in_axes = (in_axes,)\n\n def pbroadcast_fn(*args):\n nest.assert_shallow_structure(args, in_axes)\n nest.assert_shallow_structure(out_dtype, out_axes)\n map_in_axes = nest.map_structure_up_to(args, canonicalize_axis_name,\n in_axes)\n map_out_axes = nest.map_structure_up_to(out_dtype, canonicalize_axis_name,\n out_axes)\n\n def _pbroadcast_input(out_axes, x, in_axes):\n psum_axes = [\n axis_name for axis_name in out_axes if axis_name not in in_axes\n ]\n return pbroadcast(x, psum_axes)\n\n def _flat_fn_index(i, *args):\n out = fn(*args)\n return tf.nest.flatten(out)[i]\n\n def _flat_fn(*args):\n outputs = []\n for i, out_axis in enumerate(nest.flatten_up_to(out_dtype, map_out_axes)):\n local_args = nest.map_structure_up_to(\n args, functools.partial(_pbroadcast_input, out_axis), args,\n map_in_axes)\n outputs.append(_flat_fn_index(i, *local_args))\n return tf.nest.pack_sequence_as(out_dtype, outputs)\n\n return _flat_fn(*args)\n\n return pbroadcast_fn", "def test_zlevel_broadcast_fail():\n lons, lats = np.arange(10), np.arange(10)\n zlevel = np.arange(2)\n emsg = \"Cannot broadcast zlevel\"\n with pytest.raises(ValueError, match=emsg):\n _ = to_cartesian(lons, lats, zlevel=zlevel)", "def adv_index_broadcastable_pattern(a, idx):\r\n\r\n def replace_slice(v):\r\n if isinstance(v, gof.Apply):\r\n if len(v.outputs) != 1:\r\n raise ValueError(\r\n \"It is ambiguous which output of a multi-output Op has\"\r\n \" to be fetched.\", v)\r\n else:\r\n v = v.outputs[0]\r\n\r\n if NoneConst.equals(v):\r\n return None\r\n if isinstance(v.type, SliceType):\r\n return slice(None, None)\r\n\r\n return numpy.zeros((2,) * v.ndim, int)\r\n\r\n newidx = tuple(map(replace_slice, idx))\r\n\r\n #2 - True = 1; 2 - False = 2\r\n fakeshape = [2 - bc for bc in a.broadcastable]\r\n retshape = numpy.empty(fakeshape)[newidx].shape\r\n return tuple([dim == 1 for dim in retshape])", "def have_same_shapes(array1, array2):\n return array1.shape == array2.shape", "def check_shapes(arrs):\r\n shps = [i.shape for i in arrs]\r\n eq = np.all(np.array([shps[0] == i for i in shps[1:]]))\r\n err = \"Arrays arr not of the same shape...\"\r\n if not eq:\r\n raise ValueError(\"{}\\n{}\".format(err, shps))", "def test_jax_Reshape_concrete_shape():\n a = vector(\"a\")\n x = reshape(a, a.shape)\n x_fg = FunctionGraph([a], [x])\n compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX)])\n\n x = reshape(a, (a.shape[0] // 2, a.shape[0] // 2))\n x_fg = FunctionGraph([a], [x])\n compare_jax_and_py(x_fg, [np.r_[1.0, 2.0, 3.0, 4.0].astype(config.floatX)])", "def test_reduce_dimensionality(embeddings, shape):\n model = BERTopic()\n umap_embeddings = model._reduce_dimensionality(embeddings)\n assert umap_embeddings.shape == (shape, 5)", "def test_reduce_dimensionality(base_bertopic, embeddings, shape):\n umap_embeddings = base_bertopic._reduce_dimensionality(embeddings)\n assert umap_embeddings.shape == (shape, 5)", "def test_write_broadcast(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dt = np.dtype('(3,)i')\n\n dset = f.create_dataset('x', (10,), dtype=dt)\n dset[...] = 42", "def testMaskErrorIncompatibleRank3(self):\n\n np_mask = np.ones((5, 5, 2), dtype=np.float32)\n x = tf.constant(0.0, shape=(2, 8, 6, 5, 5), dtype=np.float32)\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv3D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def test_elemwise_collapse7(atol=1e-6):\r\n\r\n shape = (5, 4, 1)\r\n a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32'))\r\n a = theano._asarray(numpy.random.rand(*shape), dtype='float32')\r\n a2 = tcn.shared_constructor(a.copy(), 'a')\r\n a3 = a2.dimshuffle(0, 'x', 1, 2)\r\n f = pfunc([], [a3 + 2], mode=mode_with_gpu)\r\n\r\n #let debugmode catch errors\r\n out = f()[0]\r\n ans = (a + 2).reshape(shape[0], 1, shape[1], shape[2])\r\n assert numpy.allclose(out, ans, atol=atol)\r\n #print \"Expected collapse to c contiguous\"\r", "def test_cross_multiply_array_2_list():\n array_1 = np.ones((1, 3))\n array_2 = np.ones((1, 3)).tolist()\n array_out = utils.cross_multiply_array(array_1, array_2, axis=1)\n assert (1, 3, 3) == array_out.shape", "def test_elemwise_collapse4():\r\n\r\n shape = (4, 5)\r\n a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32'))\r\n a = theano._asarray(numpy.random.rand(*shape), dtype='float32')\r\n a2 = tcn.shared_constructor(a, 'a')\r\n a3 = a2.dimshuffle('x', 0, 1, 'x')\r\n b = tcn.CudaNdarrayType((False, False, False, False))()\r\n c = (a3 + b + 2)\r\n f = pfunc([b], [c], mode=mode_with_gpu)\r\n\r\n v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4),\r\n dtype='float32')\r\n v = cuda_ndarray.CudaNdarray(v)\r\n #let debugmode catch errors\r\n out = f(v)[0]\r\n assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v + 2)\r\n #print \"Expected collapse to 3 dimensions\"\r", "def as_same_dimension(*arrays):\n ndim = arrays[0].ndim\n for a in arrays:\n if a.ndim == ndim:\n continue\n # XXX could try broadcasting here\n raise ValueError(\"Invalid array dimensions: %s vs %s\" % (ndim, a.ndim))\n return arrays", "def _broadcast(self, v1, v2):\n v1, v2 = np.array(v1), np.array(v2)\n if len(v1.shape) < len(v2.shape):\n idx = tuple(slice(None) for i in range(len(v1.shape)))\n idx = idx + (None,) * (len(v2.shape) - len(v1.shape))\n return v1[idx], v2\n elif len(v1.shape) > len(v2.shape):\n idx = tuple(slice(None) for i in range(len(v2.shape)))\n idx = idx + (None,) * (len(v1.shape) - len(v2.shape))\n return v1, v2[idx]\n else:\n return v1, v2", "def _is_broadcast(self, op, op_reg_manager):\n op_slices = op_reg_manager.get_op_slices(op)\n op_groups = [op_reg_manager.get_op_group(op_slice)\n for op_slice in op_slices]\n return op_handler_util.get_op_size(op) == 1 and all(op_groups)", "def addbroadcast(x, *axes):\r\n rval = Rebroadcast(*[(axis, True) for axis in axes])(x)\r\n return theano.tensor.opt.apply_rebroadcast_opt(rval)", "def test_ndim_fail():\n lons = lats = np.array([0]).reshape(-1, 1, 1, 1)\n emsg = \"Require at most 3-D\"\n with pytest.raises(ValueError, match=emsg):\n _ = to_cartesian(lons, lats)", "def test_elemwise_collapse2():\r\n\r\n shape = (4, 5, 9)\r\n a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32'))\r\n a = theano._asarray(numpy.random.rand(*shape), dtype='float32')\r\n a2 = tcn.shared_constructor(a, 'a')\r\n a3 = a2.dimshuffle(0, 'x', 1, 2)\r\n b = tcn.CudaNdarrayType((False, False, False, False))()\r\n c = a3 + b\r\n f = pfunc([b], [c], mode=mode_with_gpu)\r\n\r\n v = theano._asarray(numpy.random.rand(shape[0], 5, *shape[1:]),\r\n dtype='float32')\r\n v = cuda_ndarray.CudaNdarray(v)\r\n #let debugmode catch errors\r\n out = f(v)[0]\r\n assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v)\r\n #print \"Expected collapse to 3 dimensions\"\r", "def num_47():\n\n def block_reshape(a, rows, cols, nodata=-1, as_masked=True):\n \"\"\" \"\"\"\n s = np.array(a.shape)\n w = np.array([rows, cols])\n m = divmod(s, w)\n new_shape = w*m[0] + w*(m[1]!=0)\n ypad, xpad = new_shape - a.shape\n pad = ((0, ypad), (0, xpad))\n p_with =((nodata, nodata), (nodata, nodata))\n b = np.pad(a, pad_width=pad, mode='constant', constant_values=p_with)\n w_y, w_x = w # Blocksize\n y, x = b.shape # padded array\n c = b.reshape((y//w_y, w_y, x//w_x, w_x))\n c = c.swapaxes(1, 2).reshape(-1, w_y, w_x)\n if as_masked:\n mask_val = nodata\n c = np.ma.masked_equal(c, mask_val)\n c.set_fill_value(mask_val)\n return b, c\n y, x = 5, 6\n rows, cols = [3, 4]\n nodata = -1\n a = np.arange(x*y).reshape(y,x)\n b, c = block_reshape(a, rows, cols, nodata)\n print(\"\\n{}\".format(num_47.__doc__))\n print(\"a\\n{}\\nb\\n{}\\nc\\n{}\".format(a, b, c))\n return a, b, c", "def test_correlation_broadcasts(a, b, metrics):\n # unpack metrics\n metric, _metric = metrics\n metric(a, b.isel(lat=0), dim=\"time\")\n metric(a, b.isel(lat=[0]), dim=\"time\")\n b_changed_coords = b.isel(lat=[0]).assign_coords(lat=[123])\n if (\n \"eff\" not in metric.__name__\n ): # effective metrics require to be applied over time\n with pytest.raises(\n ValueError, match=\"ndex\"\n ): # match \"indexes along dimension\" and \"cannot align objects with join='exact' where index/labels/sizes are not equal along these coordinates (dimensions)\"\n metric(a, b_changed_coords, dim=\"lat\")", "def tensor_mult(a, # n_1 x n_2 x ... x n_d tensor\n b, # m_{1} x m_{2} x ... x m_{l} tensor\n a_dims, # list of dimensions of a to broadcast multiply\n b_dims, # list of dimensions of b to broadcast multiply\n):\n \n assert len(a_dims) == len(b_dims), \"a_dims and b_dims should have the same length!\"\n assert np.all([a.shape[a_dims[i]] == b.shape[b_dims[i]] for i in range(len(a_dims))]), \"a_dims %s and b_dims%s dimensions do not match!\" %(a_dims, b_dims)\n\n d_a = a.ndim\n d_b = b.ndim\n #bring the relevant dimensions to the front\n missing_a = [i for i in range(d_a) if i not in a_dims]\n new_order_a = a_dims + missing_a\n a_t = np.transpose(a, tuple(new_order_a))\n missing_b = [i for i in range(d_b) if i not in b_dims]\n new_order_b = b_dims + missing_b\n b_t = np.transpose(b, tuple(new_order_b))\n\n #expand the tensors to make the shapes compatible\n a_t = np.reshape(a_t, list(a_t.shape)+len(missing_b)*[1])\n b_t = np.reshape(b_t, [b.shape[i] for i in b_dims]+len(missing_a)*[1]+[b.shape[i] for i in missing_b])\n\n #multiply\n c_t = a_t * b_t\n\n #reshape the results: a_dims ; missing_a ; missing_b -> original shape of a ; missing_b\n a_t_index = np.unique(new_order_a, return_index=True)[1].tolist()\n b_t_index = np.arange(d_a, d_a+d_b-len(a_dims)).tolist()\n c = np.transpose(c_t, a_t_index+b_t_index)\n return c", "def _dtype_shape_promotion(inputs):\n\n dtype_order = [\"bool\", \"int8\", \"int16\", \"int32\", \"int64\", \"float32\", \"float64\"]\n\n ranks = [len(infer_shape(x)) for x in inputs]\n if set(ranks) == set([1, 0]):\n for i, r in enumerate(ranks):\n if r == 0:\n inputs[i] = _op.expand_dims(inputs[i], axis=0)\n\n dtypes = set(dtype_order.index(infer_type(x).checked_type.dtype) for x in inputs)\n if len(dtypes) == 1:\n return inputs\n max_dtype = dtype_order[max(dtypes)]\n for i, input_op in enumerate(inputs):\n if infer_type(input_op).checked_type.dtype != max_dtype:\n inputs[i] = input_op.astype(max_dtype)\n return inputs", "def testShapes(self, use_bias):\n\n batch_size = random.randint(1, 100)\n in_length = random.randint(10, 288)\n in_channels = random.randint(1, 10)\n out_channels = random.randint(1, 32)\n\n kernel_shape = random.randint(1, 10)\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_length, in_channels])\n\n conv1 = snt.Conv1D(\n output_channels=out_channels,\n kernel_shape=kernel_shape,\n padding=snt.SAME,\n stride=1,\n name=\"conv1\",\n use_bias=use_bias)\n\n output1 = conv1(inputs)\n\n self.assertTrue(\n output1.get_shape().is_compatible_with(\n [batch_size, in_length, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [kernel_shape, in_channels, out_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))\n\n conv2 = snt.Conv1D(\n output_channels=out_channels,\n kernel_shape=kernel_shape,\n padding=snt.VALID,\n stride=1,\n name=\"conv2\",\n use_bias=use_bias)\n\n output2 = conv2(inputs)\n\n self.assertTrue(\n output2.get_shape().is_compatible_with(\n [batch_size, in_length - kernel_shape + 1, out_channels]))\n\n self.assertTrue(\n conv2.w.get_shape().is_compatible_with(\n [kernel_shape, in_channels, out_channels]))\n\n if use_bias:\n self.assertTrue(\n conv2.b.get_shape().is_compatible_with(\n [out_channels]))", "def test_shape_fail():\n lons, lats = np.arange(10), np.arange(10).reshape(5, 2)\n emsg = \"Require longitudes and latitudes with same shape\"\n with pytest.raises(ValueError, match=emsg):\n _ = to_cartesian(lons, lats)", "def testShapesSame(self, batch_size, in_length, in_channels, out_length,\n out_channels, kernel_shape, padding, use_bias, in_shape,\n out_shape, stride_shape, use_output_shape):\n if use_output_shape:\n output_shape_arg = out_shape\n else:\n output_shape_arg = None\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_length, in_channels])\n\n conv1 = snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=output_shape_arg,\n kernel_shape=kernel_shape,\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(\n output.get_shape().is_compatible_with(\n [batch_size, out_length, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [1, kernel_shape, out_channels, in_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))", "def _shaped_arange(*shape):\n return np.random.randn(np.prod(shape)).astype(np.float32).reshape(\n *shape\n ) * np.prod(shape)", "def test_elemwise_collapse6():\r\n\r\n shape = (4, 5)\r\n a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32'))\r\n a = theano._asarray(numpy.random.rand(*shape), dtype='float32')\r\n a2 = tcn.shared_constructor(a, 'a')\r\n a3 = a2.dimshuffle('x', 'x', 0, 1)\r\n b = tcn.CudaNdarrayType((True, True, False, False))()\r\n f = pfunc([b], [a3 + b], mode=mode_with_gpu)\r\n\r\n v = theano._asarray(numpy.random.rand(1, 1, shape[0], shape[1]),\r\n dtype='float32')\r\n v = cuda_ndarray.CudaNdarray(v)\r\n #let debugmode catch errors\r\n out = f(v)[0]\r\n assert numpy.allclose(out, a.reshape(1, 1, shape[0], shape[1]) + v)\r\n #print \"Expected collapse to c contiguous\"\r", "def check_input_shape(self, op, block):\n\n ipt_name = op.input(op.input_names[0])\n ipt_shape = block.var(ipt_name).shape\n for i in ipt_shape:\n if i < 0:\n warning_msg = (\n f\"Input {ipt_name}(shape={ipt_shape}) has unkown dimension shapes. \"\n f\"Specifying static values may improve performance\"\n )\n warnings.warn(warning_msg)", "def common_shape(arrays):\n arrays = iter(arrays)\n shape = next(arrays).shape\n for array in arrays:\n shape = tuple(a if a == b else None\n for a, b in zip(shape, array.shape))\n return shape", "def testKernelShape(self, out_channels, padding, use_bias, in_shape,\n out_shape, stride_shape, use_output_shape):\n snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3],\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=3,\n padding=padding,\n stride=stride_shape,\n name=\"conv1\",\n use_bias=use_bias)\n\n err = \"Invalid kernel\"\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3, 3],\n name=\"conv1\",\n use_bias=use_bias)\n\n with self.assertRaisesRegexp(snt.IncompatibleShapeError, err):\n snt.Conv1DTranspose(output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=[3, 3, 3, 3],\n name=\"conv1\",\n use_bias=use_bias)", "def gather_nd_python(a_np, indices_np):\n a_shape = a_np.shape\n indices_np = indices_np.astype(\"int32\")\n indices_shape = indices_np.shape\n assert len(indices_shape) > 1\n assert indices_shape[0] <= len(a_shape)\n b_shape = list(indices_shape[1:])\n for i in range(indices_shape[0], len(a_shape)):\n b_shape.append(a_shape[i])\n b_np = np.zeros(b_shape)\n for idx in np.ndindex(*indices_shape[1:]):\n a_idx = []\n for i in range(indices_shape[0]):\n indices_pos = tuple([i] + list(idx))\n a_idx.append(indices_np[indices_pos])\n b_np[idx] = a_np[tuple(a_idx)]\n return b_np", "def check_array_lengths(inputs, targets, weights=None):\n\n def is_tensor_or_composite_tensor(x):\n return tensor_util.is_tf_type(x) or is_composite_or_composite_value(x)\n\n def set_of_lengths(x):\n # Returns a set with the variation between\n # different shapes, with None => 0\n if x is None:\n return {}\n else:\n return set([\n y.shape[0]\n for y in x\n if y is not None and not is_tensor_or_composite_tensor(y)\n ])\n\n set_x = set_of_lengths(inputs)\n set_y = set_of_lengths(targets)\n set_w = set_of_lengths(weights)\n if len(set_x) > 1:\n raise ValueError('All input arrays (x) should have '\n 'the same number of samples. Got array shapes: ' +\n str([x.shape for x in inputs]))\n if len(set_y) > 1:\n raise ValueError('All target arrays (y) should have '\n 'the same number of samples. Got array shapes: ' +\n str([y.shape for y in targets]))\n if set_x and set_y and list(set_x)[0] != list(set_y)[0]:\n raise ValueError('Input arrays should have '\n 'the same number of samples as target arrays. '\n 'Found ' + str(list(set_x)[0]) + ' input samples '\n 'and ' + str(list(set_y)[0]) + ' target samples.')\n if len(set_w) > 1:\n raise ValueError('All sample_weight arrays should have '\n 'the same number of samples. Got array shapes: ' +\n str([w.shape for w in weights]))\n if set_y and set_w and list(set_y)[0] != list(set_w)[0]:\n raise ValueError('Sample_weight arrays should have '\n 'the same number of samples as target arrays. Got ' +\n str(list(set_y)[0]) + ' input samples and ' +\n str(list(set_w)[0]) + ' target samples.')", "def test_rasterizer_return_correct_batch_shapes(self, shapes, dtypes,\n enable_cull_face):\n placeholders = self._create_placeholders(shapes, dtypes)\n frame_buffer = rasterization_backend.rasterize(\n placeholders[0], placeholders[1], placeholders[2],\n (self.IMAGE_WIDTH, self.IMAGE_HEIGHT), enable_cull_face,\n self._num_layers, self._backend).layer(0)\n batch_size = shapes[0][0]\n self.assertEqual([batch_size],\n frame_buffer.triangle_id.get_shape().as_list()[:-3])\n self.assertEqual([batch_size],\n frame_buffer.foreground_mask.get_shape().as_list()[:-3])" ]
[ "0.63662064", "0.631577", "0.6181781", "0.60380113", "0.59342825", "0.5925404", "0.58053595", "0.57856745", "0.57498085", "0.56896067", "0.55596524", "0.55482703", "0.548472", "0.548077", "0.540709", "0.5397413", "0.53940207", "0.5373903", "0.53049004", "0.526517", "0.52371943", "0.5168172", "0.5129427", "0.51159716", "0.5099658", "0.5093899", "0.5056028", "0.50246346", "0.50067145", "0.4981141", "0.4949021", "0.49144667", "0.48919111", "0.48919111", "0.48872256", "0.4886658", "0.48802176", "0.4879475", "0.4876351", "0.48662552", "0.48611993", "0.48589945", "0.48588014", "0.4850698", "0.4842084", "0.4835378", "0.4828404", "0.48242596", "0.4818624", "0.4797015", "0.47918215", "0.47874558", "0.47774827", "0.47697437", "0.47569576", "0.4748482", "0.47389483", "0.47316363", "0.47287753", "0.47280917", "0.47257784", "0.4720666", "0.47122407", "0.47084615", "0.4708252", "0.47078866", "0.46851", "0.46847764", "0.46770257", "0.46767506", "0.46642214", "0.46577027", "0.46468487", "0.46449578", "0.4643533", "0.46343604", "0.4624989", "0.4622847", "0.46224415", "0.46218577", "0.46204442", "0.4617332", "0.46070936", "0.46042624", "0.4584135", "0.45828676", "0.45747095", "0.45495883", "0.4548146", "0.45418236", "0.4540298", "0.45239145", "0.45210767", "0.45203355", "0.4518871", "0.45184422", "0.45171613", "0.4516579", "0.45161402", "0.44998798" ]
0.6529921
0
Batched center of mass calculation of 2d arrays
def center_of_mass_2d(arr: np.ndarray, dtype=np.float32) -> np.ndarray: total = np.sum(arr, axis=(-1, -2)) grids = np.ogrid[[slice(0, i) for i in arr.shape[-2:]]] with np.errstate(invalid="ignore"): results = np.array([np.sum(arr * grid.astype(dtype), axis=(-1, -2)) / total for grid in grids], dtype=dtype) results = np.moveaxis(results, 0, -1) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def centerOfMass(data):\r\n dd = []\r\n for d in data:\r\n dd.append(d.coordinate)\r\n\r\n data = dd\r\n data = np.array(data)\r\n n = len(data)\r\n x = sum(data[:,0])\r\n y = sum(data[:,1])\r\n z = sum(data[:,2])\r\n x/=n\r\n y/=n\r\n z/=n\r\n return x,y,z,n", "def _center_of_mass(a, positions, shape, dtype):\n\n result = numpy.empty((1,), dtype=dtype)\n\n positions_nd = numpy.unravel_index(positions, shape)\n a_sum = numpy.sum(a)\n\n a_wt_i = numpy.empty(a.shape)\n for i, pos_nd_i in enumerate(positions_nd):\n a_wt_sum_i = numpy.multiply(a, pos_nd_i, out=a_wt_i).sum()\n result[\"com\"][0, i] = a_wt_sum_i / a_sum\n\n return result[0]", "def center_of_mass(im_binary, x_offset=0, y_offset=0):\n n = np.sum(im_binary)\n\n x = np.arange(im_binary.shape[1]) + x_offset\n y = np.arange(im_binary.shape[0]) + y_offset\n xv, yv = np.meshgrid(x, y)\n cx = np.sum(xv[im_binary]) / n\n cy = np.sum(yv[im_binary]) / n\n\n return cx, cy", "def get_center_of_mass_allies(self,obs):", "def calculate_centers_of_mass(x_all, y_all):\n num_of_frames, num_of_rafts = x_all.shape\n\n x_centers = x_all[:, 0:num_of_rafts].mean(axis=1)\n y_centers = y_all[:, 0:num_of_rafts].mean(axis=1)\n\n x_relative_to_centers = x_all - x_centers[:, np.newaxis]\n y_relative_to_centers = y_all - y_centers[:, np.newaxis]\n\n distances_to_centers = np.sqrt(x_relative_to_centers ** 2 + y_relative_to_centers ** 2)\n\n orbiting_angles = np.arctan2(y_relative_to_centers, x_relative_to_centers) * 180 / np.pi\n\n return distances_to_centers, orbiting_angles, x_centers, y_centers", "def center_of_mass(elements, coordinates):\n mass = molecular_weight(elements)\n mass_array = np.array([[atomic_mass[i.upper()]] * 3 for i in elements])\n mass_coordinates = coordinates * mass_array\n return (np.sum(mass_coordinates, axis=0) / np.array([mass, mass, mass]))", "def centre_of_mass(image, black_blob=False):\r\n image = image.copy()\r\n shape = image.shape\r\n if black_blob:\r\n image = 255-image\r\n centre = np.array([0, 0]).astype(float)\r\n\r\n #------------------------------START YOUR CODE-----------------------------#\r\n s = np.sum(image)\r\n indices = np.mgrid[0:image.shape[0],0:image.shape[1]]\r\n ys = np.sum(indices[0]*image)\r\n xs = np.sum(indices[1]*image)\r\n\r\n # Equivalent, but slower\r\n #xs = 0.0\r\n #ys = 0.0\r\n #s = 0.0 \r\n #for y in range(shape[0]):\r\n # for x in range(shape[1]):\r\n # p = image[y, x]\r\n # xs += x*p\r\n # ys += y*p\r\n # s += p\r\n\r\n centre = np.array([ ys/s, xs/s ])\r\n #-------------------------------END YOUR CODE------------------------------#\r\n return centre.astype(int)", "def center_of_mass(xy, masses):\n return np.sum(masses.reshape(len(xy), 1) * xy.astype(np.float), axis=0) / float(np.sum(masses))", "def CenterOfMass(points):\n A = AreaOfPolygon(points)\n N = len(points)\n cx = 0\n cy = 0\n for i in xrange(0, N):\n x_i = points[i][0]\n y_i = points[i][1]\n x_ip1 = points[(i+1) % N][0]\n y_ip1 = points[(i+1) % N][1]\n part = (x_i * y_ip1 - x_ip1 * y_i)\n cx += ((x_i + x_ip1) * part)\n cy += ((y_i + y_ip1) * part)\n return (cx/(6*A), cy/(6*A), abs(A))", "def cell_center_fast(seg_img: np.ndarray, labels: np.ndarray) -> np.ndarray:\n array_max_idx = max(labels)\n results = np.zeros((array_max_idx + 1, 3))\n results = compute_cell_center(seg_img, labels, results)\n\n return results", "def center_of_mass(molecule):\n xcom=ycom=zcom=0\n totm = 0\n for atom in get_atoms(molecule):\n m = get_mass(atom)\n x,y,z = get_xyz(atom)\n xcom += m*x\n ycom += m*y\n zcom += m*z\n totm += m\n xcom /= totm\n ycom /= totm\n zcom /= totm\n return xcom,ycom,zcom", "def centre(arrayin):\r\n ny = arrayin.shape[0]\r\n nx = arrayin.shape[1]\r\n cy = 0.0\r\n cx = 0.0\r\n for i in range(ny):\r\n for j in range(nx):\r\n cy += np.float64(arrayin[i,j]) * np.float64(i - ny/2 + 1)\r\n cx += np.float64(arrayin[i,j]) * np.float64(j - nx/2 + 1)\r\n cx = cx / np.sum(arrayin)\r\n cy = cy / np.sum(arrayin)\r\n arrayout = np.roll(arrayin ,-int(cy),0)\r\n arrayout = np.roll(arrayout,-int(cx),1)\r\n return [arrayout,cy,cx]", "def center(X):\n \n n,m = X.shape\n if n != m:\n raise Exception('Matrix is not square.')\n \n colsum = X.sum(axis=0) / n\n rowsum = X.sum(axis=1) / n\n totalsum = X.sum() / (n**2)\n \n #center\n Y = array([[ X[i,j]-rowsum[i]-colsum[j]+totalsum for i in range(n) ] for j in range(n)])\n \n return Y", "def calculate_center_of_mass(symbols, coordinates):\n\n total_mass = calculate_molecular_mass(symbols)\n\n mass_array = np.zeros([len(symbols),1])\n\n for i in range(len(symbols)):\n mass_array[i] = atomic_weights[symbols[i]]\n\n center_of_mass = sum(coordinates * mass_array) / total_mass\n\n return center_of_mass", "def center_of_mass(points):\n # break into many triangles\n # each point is part of two triangles\n cor = [sum(points) / len(points)]\n mass_points = []\n area = 0\n for i in range(len(points) - 1):\n triangle = cor + points[i:i + 2]\n # print(triangle)\n mass_points.append(build_triangle_point_mass(triangle))\n area += shoelace_area(triangle)\n # print(triangle, area)\n mass_points.append(build_triangle_point_mass(cor + [points[-1], points[0]]))\n area += shoelace_area(cor + [points[-1], points[0]])\n return Vector2D(*find_com(*zip(*mass_points))), area", "def compute_cell_center(seg_img: np.ndarray, labels: np.ndarray, results: np.ndarray) \\\n -> np.ndarray:\n for label in labels:\n if label != 0:\n all_points_z, all_points_x, all_points_y = np.where(seg_img == label)\n avg_z = np.round(np.mean(all_points_z))\n avg_x = np.round(np.mean(all_points_x))\n avg_y = np.round(np.mean(all_points_y))\n results[label] = [avg_z, avg_x, avg_y]\n\n return results", "def CenterOfMassForShape(shape):\n polygons = SplitIntoPolygons(shape)\n total_A = 0\n total_cx = 0\n total_cy = 0\n\n for polygon in polygons:\n cx, cy, A = CenterOfMass(polygon)\n total_cx += A * cx\n total_cy += A * cy\n total_A += A\n\n return (total_cx / total_A, total_cy / total_A)", "def test_get_center_of_mass(self):\n symbols = ['C', 'H', 'H', 'H', 'H']\n coords = np.array([[0.0000000, 0.0000000, 0.0000000],\n [0.6269510, 0.6269510, 0.6269510],\n [-0.6269510, -0.6269510, 0.6269510],\n [-0.6269510, 0.6269510, -0.6269510],\n [0.6269510, -0.6269510, -0.6269510]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, symbols=symbols)\n for cm_coord in center_of_mass:\n self.assertEqual(cm_coord, 0.0)\n\n symbols = ['O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H']\n coords = np.array([[1.28706525, 0.52121353, 0.04219198],\n [0.39745682, -0.35265044, -0.63649234],\n [0.36441173, -1.68197093, 0.08682400],\n [-0.59818222, 0.10068325, -0.65235399],\n [0.74799641, -0.48357798, -1.66461710],\n [0.03647269, -1.54932006, 1.12314420],\n [-0.31340646, -2.38081353, -0.41122551],\n [1.36475837, -2.12581592, 0.12433596],\n [2.16336803, 0.09985803, 0.03295192]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, symbols=symbols)\n self.assertAlmostEqual(center_of_mass[0], 0.7201, 3)\n self.assertAlmostEqual(center_of_mass[1], -0.4880, 3)\n self.assertAlmostEqual(center_of_mass[2], -0.1603, 3)\n\n numbers = [6, 6, 8, 1, 1, 1, 1, 1, 1]\n coords = np.array([[1.1714680, -0.4048940, 0.0000000],\n [0.0000000, 0.5602500, 0.0000000],\n [-1.1945070, -0.2236470, 0.0000000],\n [-1.9428910, 0.3834580, 0.0000000],\n [2.1179810, 0.1394450, 0.0000000],\n [1.1311780, -1.0413680, 0.8846660],\n [1.1311780, -1.0413680, -0.8846660],\n [0.0448990, 1.2084390, 0.8852880],\n [0.0448990, 1.2084390, -0.8852880]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, numbers=numbers)\n self.assertAlmostEqual(center_of_mass[0], -0.0540, 3)\n self.assertAlmostEqual(center_of_mass[1], -0.0184, 3)\n self.assertAlmostEqual(center_of_mass[2], -0.0000, 3)", "def center(self, center_mass=False):\n if center_mass:\n com = self.center_of_mass\n self.xyz -= com\n else:\n self.xyz -= self.xyz.mean(0)", "def get_center_of_mass_enemies(self,obs):", "def getCenterOfMass(self, filtered = True):\n n_time = len(self.pos)\n x_mean = [0.0,]*n_time\n y_mean = [0.0,]*n_time\n z_mean = [0.0,]*n_time \n for frame in range(n_time):\n # get all the positions of the filtered points\n x,y,z = self.getAllPositions(frame, filtered)\n x_mean[frame] = np.asarray(x).mean() if len(x) > 0 else None\n y_mean[frame] = np.asarray(y).mean() if len(y) > 0 else None\n z_mean[frame] = np.asarray(z).mean() if len(z) > 0 else None\n\n return x_mean, y_mean, z_mean", "def centres_of_mass_2D(image):\n centroids = []\n bords = []\n areas = []\n radius = []\n \n for info in measure.regionprops(image, ['Centroid', 'BoundingBox', 'Area', 'equivalent_diameter', 'Label']): \n \n # Skip wrong regions\n index = np.where(image==info['Label'])\n if index[0].size==0 & index[1].size==0:\n continue\n \n # TODO: change this value\n if info['Area'] > image.shape[0] / 4.:\n \n \n centre = info['Centroid']\n D = info['equivalent_diameter']\n \n #min_row, min_col, max_row, max_col = info['BoundingBox']\n #a1 = int((max_row - min_row) / 2.)\n #a2 = int((max_col - min_col) / 2.)\n \n #box_cent = (a1 + min_row, a2 + min_col)\n \n radius.append(round(D / 2.0, 3))\n centroids.append( (round(centre[0], 3),round(centre[1], 3)) )\n #bords.append(box_cent)\n\n return [centroids, radius]", "def calculate_center_of_mass(chainVecs: IMP.algebra.Vector3Ds):\n return IMP.algebra.get_centroid(chainVecs)", "def get_center_of_masses(self) -> np.array:\n com = np.average(self.obj[:, :2], weights=self.obj[:, 2], axis=0)\n return com", "def center(emg_data: np.ndarray, center_value: float = None) -> np.ndarray:\n center_value = center_value if center_value else emg_data.mean(axis=1)\n emg_centered = np.copy(emg_data)\n for i in range(emg_data.shape[0]):\n emg_centered[i, :] = emg_data[i, :] - center_value[i]\n return emg_centered", "def centroid(coords,masses,divider):\n\treturn np.array([np.dot(masses[r].T,coords[r])/np.sum(masses[r]) for r in divider])", "def computeCenters3d(self, data):\n\n\n for i in range(self.nPoints):\n print(\"Label of point \", i, \" is \", self.labels[i])\n for j in range(3):\n self.centers[self.labels[i]][j] += data[i][j]\n\n for c in range(self.n):\n for j in range(3):\n self.centers[c][j] /= self.tots[c]", "def center_of_mass(mask):\n M = cv2.moments(mask)\n # Usando a expressão do centróide definida em: https://en.wikipedia.org/wiki/Image_moment\n if M[\"m00\"] == 0:\n M[\"m00\"] = 1\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n return [int(cX), int(cY)]", "def barycentre (liste_objets):\r\n x = 0\r\n y = 0\r\n summass = 0\r\n for i in liste_objets:\r\n x += i.mass * i.posx\r\n y += i.mass * i.posy\r\n summass += i.mass\r\n x /= summass\r\n y /= summass\r\n return x,y,summass", "def center_of_mass(self, matrix):\n # Changing the positions of all objects relative to center of mass, in origo.\n x, y, z = np.sum(matrix[:, 0].reshape(self.numbodies, 1)*matrix[:, 1:4], axis=0)/(np.sum(matrix[:, 0], axis=0))\n print('Center of mass located at (%.4g, %.4g, %.4g)' %(x, y, z))\n # x-direction\n matrix[:, 1] = matrix[:, 1]-x\n # y-direction\n matrix[:, 2] = matrix[:, 2]-y\n # z-direction\n matrix[:, 3] = matrix[:, 3]-z\n # The Suns initial velocity which makes the total momentum of the system zero\n # velcity_sun = sum(mass_planet_i*veocity_planet_i)/(mass_sun)\n u, v, w = np.sum(matrix[:, 0].reshape(self.numbodies, 1)*matrix[:, 4:7], axis=0)/(matrix[0, 0])\n print('The initial velocity of the Sun (%.4g, %.4g, %.4g)' %(u, v, w))\n matrix[0, 4:7] = u, v, w\n # Returning the modified matrix\n return matrix", "def _get_molecule_center_of_mass(self):\n center_of_mass = np.zeros([3], dtype=float)\n masses = self._prmtop[\"MASS\"]\n for atom_ind in range(len(self._crd)):\n center_of_mass += masses[atom_ind] * self._crd[atom_ind]\n total_mass = masses.sum()\n if total_mass == 0:\n raise RuntimeError(\"zero total mass\")\n return center_of_mass / total_mass", "def mcentroid(xarr, yarr, kern=default_kernal, xc=None, xdiff=None):\n\n if xdiff is None:\n xdiff = len(kern)\n\n if xdiff < len(kern):\n xdiff = len(kern)\n\n\n if xc is not None and xdiff:\n mask = (abs(xarr - xc) < xdiff)\n else:\n mask = np.ones(len(xarr), dtype=bool)\n\n # convle the input array with the default kernal\n warr = np.convolve(yarr[mask], kern, mode='same')\n\n # interpolate the results\n # imask is used to make sure we are only gettin the\n # center pixels\n imask = (abs(xarr[mask]-xarr[mask].mean()) < 3)\n cx = np.interp(0, warr[imask], xarr[mask][imask])\n return cx", "def _get_cbeam_mass(model, xyz, element_ids, all_eids,\n length_eids_pids, lengths, nsm_centroids_length,\n eids, mass, cg, inertia, reference_point):\n eids2 = get_sub_eids(all_eids, eids, 'CBEAM')\n for eid in eids2:\n elem = model.elements[eid]\n prop = elem.pid_ref\n pid = elem.pid\n n1, n2 = elem.node_ids\n xyz1 = xyz[n1]\n xyz2 = xyz[n2]\n centroid = (xyz1 + xyz2) / 2.\n length = norm(xyz2 - xyz1)\n\n is_failed, out = elem.get_axes(model)\n if is_failed:\n model.log.error(out)\n raise RuntimeError(out)\n wa, wb, _ihat, jhat, khat = out\n p1 = xyz1 + wa\n p2 = xyz2 + wb\n if prop.type == 'PBEAM':\n rho = prop.Rho()\n\n # we don't call the MassPerLength method so we can put the NSM centroid\n # on a different axis (the PBEAM is weird)\n mass_per_lengths = []\n nsm_per_lengths = []\n for (area, nsm) in zip(prop.A, prop.nsm):\n mass_per_lengths.append(area * rho)\n nsm_per_lengths.append(nsm)\n mass_per_length = integrate_positive_unit_line(prop.xxb, mass_per_lengths)\n nsm_per_length = integrate_positive_unit_line(prop.xxb, nsm_per_lengths)\n nsm_n1 = (p1 + jhat * prop.m1a + khat * prop.m2a)\n nsm_n2 = (p2 + jhat * prop.m1b + khat * prop.m2b)\n nsm_centroid = (nsm_n1 + nsm_n2) / 2.\n #if nsm != 0.:\n #p1_nsm = p1 + prop.ma\n #p2_nsm = p2 + prop.mb\n elif prop.type == 'PBEAML':\n mass_per_lengths = prop.get_mass_per_lengths()\n #mass_per_length = prop.MassPerLength() # includes simplified nsm\n\n # m1a, m1b, m2a, m2b=0.\n nsm_centroid = (p1 + p2) / 2.\n\n # mass_per_length already includes nsm\n mass_per_length = integrate_positive_unit_line(prop.xxb, mass_per_lengths)\n nsm_per_length = 0.\n\n #nsm_centroid = np.zeros(3) # TODO: what is this...\n #nsm = prop.nsm[0] * length # TODO: simplified\n elif prop.type == 'PBCOMP':\n mass_per_length = prop.MassPerLength()\n nsm_per_length = prop.nsm\n nsm_n1 = (p1 + jhat * prop.m1 + khat * prop.m2)\n nsm_n2 = (p2 + jhat * prop.m1 + khat * prop.m2)\n nsm_centroid = (nsm_n1 + nsm_n2) / 2.\n elif prop.type == 'PBMSECT':\n continue\n #mass_per_length = prop.MassPerLength()\n #m = mass_per_length * length\n #nsm = prop.nsm\n else: # pragma: no cover\n raise NotImplementedError(prop.type)\n\n #mpl = elem.pid_ref.MassPerLength()\n #m = mpl * length\n\n length_eids_pids['PBEAM'].append((eid, pid))\n lengths['PBEAM'].append(length)\n nsm_centroids_length['PBEAM'].append(nsm_centroid)\n m = mass_per_length * length\n nsm = nsm_per_length * length\n if CHECK_MASS and ((m + nsm) != elem.Mass() or not np.array_equal(centroid, elem.Centroid())): # pragma: no cover\n msg = 'CBEAM; eid=%s; %s pid=%s; m/L=%s nsm/L=%s; length=%s\\n' % (\n eid, pid, prop.type, mass_per_length, nsm_per_length, length)\n msg += 'mass_new=%s mass_old=%s\\n' % (m, elem.Mass())\n msg += 'centroid_new=%s centroid_old=%s\\n%s' % (\n str(centroid), str(elem.Centroid()), str(elem))\n raise RuntimeError(msg)\n\n if eid not in element_ids:\n continue\n #nsm = (nsm_per_length + nsmi) * length\n (x, y, z) = centroid - reference_point\n (xm, ym, zm) = nsm_centroid - reference_point\n x2 = x * x\n y2 = y * y\n z2 = z * z\n xm2 = xm * xm\n ym2 = ym * ym\n zm2 = zm * zm\n\n # Ixx, Iyy, Izz, Ixy, Ixz, Iyz\n inertia[0] += m * (y2 + z2) + nsm * (ym2 + zm2)\n inertia[1] += m * (x2 + z2) + nsm * (xm2 + zm2)\n inertia[2] += m * (x2 + y2) + nsm * (xm2 + ym2)\n inertia[3] += m * x * y + nsm * xm * ym\n inertia[4] += m * x * z + nsm * xm * zm\n inertia[5] += m * y * z + nsm * ym * zm\n massi = m + nsm\n mass += massi\n cg += m * centroid + nsm * nsm_centroid\n #print('length=%s mass=%s mass_per_length=%s nsm_per_length=%s m=%s nsm=%s centroid=%s nsm_centroid=%s' % (\n #length, mass, mass_per_length, nsm_per_length, m, nsm, centroid, nsm_centroid))\n if CHECK_MASS and massi != elem.Mass(): # pragma: no cover\n msg = 'mass_new=%s mass_old=%s\\n' % (massi, elem.Mass())\n msg += 'centroid_new=%s centroid_old=%s\\n%s' % (\n str(centroid), str(elem.Centroid()), str(elem))\n raise RuntimeError(msg)\n return mass", "def find_center_of_mass(selection='(all)', state=-1):\n state = utils.int_to_state(state)\n model = cmd.get_model(selection, state=state)\n com = cpv.get_null()\n # iterate all atoms and add vectors of center of mass of each atoms\n for atom in model.atom:\n com = cpv.add(com, atom.coord)\n com = cpv.scale(com, 1.0 / len(model.atom))\n return com", "def compute_centers_of_hypercubes(self):\n for hc in self.hypercubes.flatten():\n for i in range(self.dims - 1, -1, -1):\n index = self.dims - (i + 1)\n hc.center[i] = (hc.coords[index] + 0.5) * self.hypercube_measurements[index]", "def inner(self, a: np.ndarray, b: np.ndarray) -> float:\n return a.T @ (self.mass @ b)", "def take_center(array,size):\n size /= 200\n Nc,Nx,Ny,Nz = array.shape\n nx,ny,nz = int(Nx/2), int(Ny/2),int(Nz/2)\n rx,ry,rz = math.ceil(Nx*size), math.ceil(Ny*size), math.ceil(Nz*size)\n reduced = array[:,nx-rx:nx+rx,ny-ry:ny+ry,nz-rz:nz+rz]\n #print(nx,ny,nz,rx,ry,rz)\n return reduced", "def _MStep(x, z, k):\n dim = x.shape[1]\n centers = np.repeat(np.reshape(x.mean(0), (1, dim)), k, 0)\n for q in range(k):\n if np.sum(z == q) == 0:\n pass\n else:\n centers[q] = np.mean(x[z == q], 0)\n return centers", "def center_size(boxes):\n concat = P.Concat(1)\n return concat(((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2])) # w, h", "def center_of_mass(self, entity, geometric=False):\n\n # Structure, Model, Chain, Residue\n if isinstance(entity, Entity.Entity):\n atom_list = entity.get_atoms()\n # List of Atoms\n elif hasattr(entity, \"__iter__\") and [x for x in entity if x.level == \"A\"]:\n atom_list = entity\n # Some other weirdo object\n else:\n raise ValueError(\n f\"Center of Mass can only be calculated from the following objects:\\n\"\n f\"Structure, Model, Chain, Residue, list of Atoms.\"\n )\n\n masses = []\n positions = [[], [], []] # [ [X1, X2, ..] , [Y1, Y2, ...] , [Z1, Z2, ...] ]\n\n for atom in atom_list:\n masses.append(atom.mass)\n\n for i, coord in enumerate(atom.coord.tolist()):\n positions[i].append(coord)\n\n # If there is a single atom with undefined mass complain loudly.\n if \"ukn\" in set(masses) and not geometric:\n raise ValueError(\n f\"Some atoms don't have an element assigned.\\n\"\n f\"Try adding them manually or calculate the geometrical center of mass instead.\"\n )\n\n if geometric:\n return [sum(coord_list) / len(masses) for coord_list in positions]\n else:\n w_pos = [[], [], []]\n for atom_index, atom_mass in enumerate(masses):\n w_pos[0].append(positions[0][atom_index] * atom_mass)\n w_pos[1].append(positions[1][atom_index] * atom_mass)\n w_pos[2].append(positions[2][atom_index] * atom_mass)\n\n return [sum(coord_list) / sum(masses) for coord_list in w_pos]", "def compute_projmass(args):\n radius = args.radius/3600.0\n\n k_map = pyfits.open(args.kappa_map)\n k_data = k_map[0].data\n k_data_tmp = k_data\n\n pix_dim = math.fabs(k_map[0].header[\"CDELT1\"])\n pix_unit = k_map[0].header[\"CUNIT1\"]\n shape = k_map[0].data.shape\n\n x_axis = np.linspace(-(shape[0] - 1.0)/2.0*pix_dim , \\\n (shape[0] - 1.0)/2.0*pix_dim, shape[0])\n y_axis = np.linspace(-(shape[1] - 1.0)/2.0*pix_dim , \\\n (shape[1] - 1.0)/2.0*pix_dim, shape[1])\n\n if pix_unit != \"deg\":\n print \"Error, pixel unit not in deg\"\n if (x_axis.max() - x_axis.min())/2.0 < radius:\n print \"Error, the radius is larger than the image limits\"\n\n\n proj_mass = 0.0\n for i_x in range(shape[0]):\n for i_y in range(shape[1]):\n if x_axis[i_x]**2.0 + y_axis[i_y]**2.0 <= radius**2.0:\n #k_data_tmp[i_x][i_y] = 0.0\n proj_mass += k_data_tmp[i_x][i_y]\n\n print \"%e M_sol\" % (proj_mass*1E12)\n\n if args.plot_cont:\n circ = fc.make_circunference(radius*3600, 0, 0)\n plt.plot(circ[0], circ[1], \"k--\", linewidth = 2)\n plt.contour(x_axis*3600.0, y_axis*3600.0, k_data)\n plt.show()\n\n return proj_mass", "def populateCenters(matrix, row, col, frame, midRange, roughness, perturbance):\n maxIndex = matrix.shape[0]-1\n quarterRange = midRange/2\n\n pf = perturbanceFactor(matrix.shape[0], midRange, perturbance)\n noiseLevel = roughness * pf\n\n \"\"\"\n For each subdivided cube, getIndexRef is used to get the indicies, and center is used\n to determine the points that should be averaged and the point to be set. \n setValue does the calculations.\n \"\"\"\n indexRef = getIndexRef(row, col, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col + midRange, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col + midRange, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col + midRange, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col + midRange, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n\n #printAllowCancel(matrix)", "def mass_eval(self):\n # Calculate lengths\n L = np.zeros(self.m)\n for i in range(self.m):\n L[i] = np.linalg.norm(self.coord[self.con[i, 0], :] - self.coord[self.con[i, 1], :])\n\n # Calculate total mass\n self.mass = 0\n for i in range(self.m):\n self.mass += L[i]*self.WEIGHT[int(self.sizes[i])]", "def gen_center_points(x_res, y_res, dim):\n center_points = []\n\n for x in range(math.floor(dim[0] / x_res)):\n for y in range(math.floor(dim[1] / y_res)):\n x = (x + 1) * x_res\n y = (y + 1) * y_res\n center_points.append((x, y))\n\n return center_points", "def normalize_data(batch_data):\n B, N, C = batch_data.shape\n normal_data = np.zeros((B, N, C))\n for b in range(B):\n pc = batch_data[b]\n centroid = np.mean(pc, axis=0)\n pc = pc - centroid\n m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))\n pc = pc / m\n normal_data[b] = pc\n return normal_data", "def _pca(self):\n mean_beam = np.mean(self.beam_images, axis=1, keepdims=False)\n mask = self.mask\n beam_images = self.beam_images[:, :self.n_beam_images]\n\n # Subtract mean_beam from images and apply the mask. Element-wise\n # multiplication and subtraction using numpy broadcasting (as commented\n # out below) requires 3 large matrices in memory at an intermediate\n # point in the computation, namely right after (beam_images -\n # mean_beam_2d) is evaluated and memory for centered_masked_images is\n # allocated.\n # mask_2d = mask[:,np.newaxis]\n # mean_beam_2d = mean_beam[:,np.newaxis]\n # centered_masked_images = mask_2d * (beam_images - mean_beam_2d)\n\n # Instead of that direct approach, use self._center_and_mask_numba() or\n # self._center_and_mask_in_place(). As of this writing the _in_place\n # version is faster, but this may change in the future since the numba\n # version supports parallelization.\n centered_masked_images = self._center_and_mask_in_place(\n beam_images,\n mask,\n mean_beam,\n )\n # centered_masked_images should be C-contiguous already but it's good to\n # make sure.\n centered_masked_images = np.ascontiguousarray(centered_masked_images)\n\n # Compute the masked principal components\n # -1 since last eigenvector isn't necessarily orthogonal to the others.\n n_eigs = min(self.n_beam_images - 1, self.max_principal_components)\n n_eigs = max(n_eigs, 1) # Need at least one.\n # .T means transpose, @ means matrix multiplication.\n cov_mat = centered_masked_images.T @ centered_masked_images\n del centered_masked_images # Free up memory.\n if self.use_sparse_routines:\n variances, principal_components = eigsh(\n cov_mat, k=n_eigs, which='LM')\n else:\n eigvals_param = (\n self.n_beam_images - n_eigs,\n self.n_beam_images - 1)\n # overwrite_a might reduce memory usage\n variances, principal_components = eigh(\n cov_mat, eigvals=eigvals_param, overwrite_a=True)\n del cov_mat # Free up memory.\n\n # Reverse ordering to put largest eigenvectors/eigenvalues first\n principal_components = np.fliplr(principal_components)\n variances = np.flip(variances)\n\n # principal_components isn't always C-contiguous, and when it's not the\n # matrix multiplication below becomes extremely slow. It's much faster\n # to make it C-contiguous first so that numpy can use faster matrix\n # multiplication routines behind the scenes.\n principal_components = np.ascontiguousarray(principal_components)\n\n # Construct the un-masked basis vectors.\n centered_images = beam_images - mean_beam[:, np.newaxis]\n # centered_images should be C-contiguous already but it's good to make\n # sure.\n centered_images = np.ascontiguousarray(centered_images)\n principal_components = centered_images @ principal_components\n del centered_images # Free up memory.\n\n # As of this writing, self._normalize_vectorized() is faster than using\n # self._normalize_numba() despite the fact that the latter is uses numba\n # and allows for parallelization. That may change in the future though.\n principal_components = self._normalize_vectorized(\n principal_components,\n mask,\n )\n\n return mean_beam, principal_components, variances", "def center(self):\n return np.array([0,0,1/self.C+self.pos()])", "def center_and_normalise_kernel(K_temp):\n\n K_temp = KernelCenterer().fit_transform(K_temp)\n nb_item = K_temp.shape[0]\n K_norm = np.zeros((nb_item, nb_item))\n for i in range(nb_item):\n for j in range(i, nb_item):\n K_norm[i, j] = K_temp[i, j] / math.sqrt(K_temp[i, i] * K_temp[j, j])\n K_norm[j, i] = K_norm[i, j]\n\n return K_norm", "def center_of_mass(self, time):\n if self.start_time <= time <= self.end_time:\n diff = time - self.start_time\n valid = np.flatnonzero(self.masks[diff] != 0)\n if valid.size > 0:\n com_x = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] *\n self.x[diff].ravel()[valid])\n com_y = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] *\n self.y[diff].ravel()[valid])\n else:\n com_x = np.mean(self.x[diff])\n com_y = np.mean(self.y[diff])\n else:\n com_x = None\n com_y = None\n return com_x, com_y", "def center_of_charge(self):\n ret = [0.0, 0.0, 0.0]\n total_c = 0.0\n\n for at in range(self.natom()):\n c = self.charge(at)\n ret = add(ret, scale(self.xyz(at), c))\n total_c += c\n\n ret = scale(ret, 1.0 / total_c)\n return ret", "def centre_of_mass(mol):\n\n numatoms = mol.GetNumAtoms()\n conf = mol.GetConformer()\n if not conf.Is3D():\n return 0\n # get coordinate of each atoms\n pts = np.array([list(conf.GetAtomPosition(atmidx)) for atmidx in range(numatoms)])\n atoms = [atom for atom in mol.GetAtoms()]\n mass = Descriptors.MolWt(mol)\n # get center of mass\n center_of_mass = np.array(np.sum(atoms[i].GetMass() * pts[i] for i in range(numatoms))) / mass\n return center_of_mass", "def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)", "def fillCostMatrix(xs0,ys0,xs1,ys1):\n M = int ( max(len(xs0),len(xs1)) ) #Number of centroids.\n costMatrix = np.zeros((M,M))\n x_rows = np.zeros(M)\n x_rows[0:len(xs0)] = xs0\n y_rows = np.zeros(M)\n y_rows[0:len(xs0)] = ys0\n \n x_cols = np.zeros(M)\n x_cols[0:len(xs1)] = xs1\n y_cols = np.zeros(M)\n y_cols[0:len(xs1)] = ys1\n\n for i in range(len(xs0)):\n for j in range(len(xs1)):\n costMatrix[i,j]=(y_rows[i]-y_cols[j])**2\n costMatrix[i,j] += (x_rows[i]-x_cols[j])**2\n return costMatrix", "def center_of_mass_polyhedron():\n raise NotImplementedError", "def build_covariance_matrix (numpy_cloud, reduce_by_center_of_mass=True ):\r\n\r\n # build a sum over all points\r\n sum_xyz = np.sum (numpy_cloud, axis=0 )\r\n\r\n # and normalize it to get center of mass\r\n mass_center = sum_xyz / numpy_cloud.shape[0]\r\n\r\n # reduce point cloud by center of mass\r\n if (reduce_by_center_of_mass ):\r\n numpy_cloud_reduced = np.subtract (numpy_cloud[:, 0:3], mass_center )\r\n else:\r\n numpy_cloud_reduced = numpy_cloud.copy ()\r\n\r\n # build ATA matrix\r\n a_transposed_a = np.zeros ((3, 3 ))\r\n\r\n for point in numpy_cloud_reduced:\r\n a_transposed_a[0, 0] = a_transposed_a[0, 0] + np.float_power(point[0], 2 )\r\n a_transposed_a[0, 1] = a_transposed_a[0, 1] + point[0] * point[1]\r\n a_transposed_a[0, 2] = a_transposed_a[0, 2] + point[0] * point[2]\r\n\r\n a_transposed_a[1, 0] = a_transposed_a[1, 0] + point[0] * point[1]\r\n a_transposed_a[1, 1] = a_transposed_a[1, 1] + np.float_power(point[1], 2 )\r\n a_transposed_a[1, 2] = a_transposed_a[1, 2] + point[1] * point[2]\r\n\r\n a_transposed_a[2, 0] = a_transposed_a[2, 0] + point[0] * point[2]\r\n a_transposed_a[2, 1] = a_transposed_a[2, 1] + point[2] * point[1]\r\n a_transposed_a[2, 2] = a_transposed_a[2, 2] + np.float_power(point[2], 2 )\r\n\r\n return a_transposed_a, mass_center", "def center_of_mass_ij(self, time):\n if self.start_time <= time <= self.end_time:\n diff = time - self.start_time\n valid = np.flatnonzero(self.masks[diff] > 0)\n if valid.size > 0:\n com_i = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] *\n self.i[diff].ravel()[valid])\n com_j = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] *\n self.j[diff].ravel()[valid])\n else:\n com_i = np.mean(self.i[diff])\n com_j = np.mean(self.j[diff])\n com_i = int(np.round(com_i))\n com_j = int(np.round(com_j))\n else:\n com_i = None\n com_j = None\n return com_i, com_j", "def newCenter(x, y, group, iteration, lastKSet1, lastKSet2):\n\tsumOneX = 0\n\tsumOneY = 0\n\tsumTwoX = 0\n\tsumTwoY = 0\n\tnumOne = 0\n\tnumTwo = 0\n\n\tfor i in range(len(group[iteration])):\n\t\tif (group[iteration][i] == 1):\n\t\t\tsumOneX += x[i]\n\t\t\tsumOneY += y[i]\n\t\t\tnumOne += 1\n\t\telse:\n\t\t\tsumTwoX += x[i]\n\t\t\tsumTwoY += y[i]\n\t\t\tnumTwo += 1\n\n\tif(numOne == 0):\n\t\tkSet1 = lastKSet1\n\tif(numTwo == 0):\n\t\tkSet2 = lastKSet2\n\telse:\n\t\tkSet1 = [sumOneX/numOne, sumOneY/numOne]\n\t\tkSet2 = [sumTwoX/numTwo, sumTwoY/numTwo]\n\n\treturn (kSet1, kSet2)", "def centroid(arr):\n l = arr.shape[0]\n ixs = np.arange(l)\n arr = arr - np.median(arr)\n arr = np.where(arr < 0, 0, arr) \n ixs2 = ixs * ixs\n sumarr = arr.sum()\n cen = np.dot(arr, ixs)/sumarr\n return cen, math.sqrt(np.dot(arr, ixs2)/sumarr - cen * cen)", "def cells_center(self,refresh=False,mode='first3'):\n if refresh is True:\n to_update=slice(None)\n elif refresh is not False:\n to_update=refresh\n else:\n to_update = np.isnan(self.cells['_center'][:,0])\n\n if np.sum(to_update) > 0:\n if mode=='first3':\n p1,p2,p3 = [self.nodes['x'][self.cells['nodes'][to_update,i]] for i in [0,1,2]]\n self.cells['_center'][to_update] = circumcenter(p1,p2,p3)\n elif mode=='sequential':\n for c in np.arange(self.Ncells())[to_update]:\n points=self.nodes['x'][self.cell_to_nodes(c)]\n self.cells['_center'][c] = poly_circumcenter(points)\n \n return self.cells['_center']", "def compute_centers_of_hypercubes(self):\n for hypercube in self.hypercubes.flatten():\n sums = np.zeros((len(hypercube.coords)))\n for coords in hypercube.parent_hypercubes_indices:\n for index, summ in enumerate(sums):\n sums[index] += self.parent_hypercubes[coords].center[index]\n hypercube.center = [x / 4 for x in sums]", "def Kernel_Centering(self, K):\n N = K.shape[0]\n one_N = np.ones((N, N), dtype=int) / N\n K_centered = K - np.dot(one_N, K) - np.dot(K, one_N) + np.dot(one_N, np.dot(K, one_N))\n return K_centered", "def fillCostMatrix(xs0,ys0,xs1,ys1):\n M = int ( max(len(xs0),len(xs1)) ) #Number of centroids.\n costMatrix = np.ones((M,M))*-1\n x_rows = np.zeros(M)\n x_rows[0:len(xs0)] = xs0\n y_rows = np.zeros(M)\n y_rows[0:len(xs0)] = ys0\n \n x_cols = np.zeros(M)\n x_cols[0:len(xs1)] = xs1\n y_cols = np.zeros(M)\n y_cols[0:len(xs1)] = ys1\n\n for i in range(len(xs0)):\n for j in range(len(xs1)):\n costMatrix[i,j]=(y_rows[i]-y_cols[j])**2\n costMatrix[i,j] += (x_rows[i]-x_cols[j])**2\n return costMatrix", "def MeanCenter(X, mc_row, mc_col):\n data_headers = X.select_dtypes(include=[\"float64\"]).columns\n if mc_row:\n X[data_headers] = X[data_headers].sub(X[data_headers].mean(axis=1), axis=0)\n if mc_col:\n X[data_headers] = X[data_headers].sub(X[data_headers].mean(axis=0), axis=1)\n return X", "def cells_centroid_py(self):\n A=self.cells_area()\n cxy=np.zeros( (self.Ncells(),2), np.float64)\n\n refs=self.nodes['x'][self.cells['nodes'][:,0]]\n\n all_pnts=self.nodes['x'][self.cells['nodes']] - refs[:,None,:]\n\n for c in np.nonzero(~self.cells['deleted'])[0]:\n nodes=self.cell_to_nodes(c)\n\n i=np.arange(len(nodes))\n ip1=(i+1)%len(nodes)\n nA=all_pnts[c,i]\n nB=all_pnts[c,ip1]\n\n tmp=(nA[:,0]*nB[:,1] - nB[:,0]*nA[:,1])\n cxy[c,0] = ( (nA[:,0]+nB[:,0])*tmp).sum()\n cxy[c,1] = ( (nA[:,1]+nB[:,1])*tmp).sum()\n cxy /= 6*A[:,None] \n cxy += refs\n return cxy", "def center(coords):\n for c in coords:\n if 'avg' not in locals():\n avg = c\n else:\n avg += c\n return avg / len(coords)", "def center(self, x):\n\n shape = x.shape\n nx = shape[1]\n ny = shape[0]\n hnx = nx // 2\n hny = ny // 2\n\n temp = x[0:hny, 0:hnx].copy()\n x[0:hny, 0:hnx] = x[hny:ny, hnx:nx].copy()\n x[hny:ny, hnx:nx] = temp\n\n temp = x[0:hny, hnx:nx].copy()\n x[0:hny, hnx:nx] = x[hny:ny, 0:hnx].copy()\n x[hny:ny, 0:hnx] = temp", "def computeB(linsys_setup):\n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n nx, ny, pixScaleX, pixScaleY = map_prop\n nFreq = len(g_nu); nCluster = len(clumaps[0])\n ksz = False\n if len(clumaps)==2: ksz = True\n \n def computeCMBY(d0):\n \"\"\"\n For CMB, y = S^1/2 A N^-1 d, where S is CMB signal covariance matrix (Cl's)\n \"\"\"\n # N.B. Reshaping operations required to go between 2D pixel arrays and \n # 1D vector (for linear system)\n d2 = 0\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny,nx))\n d1 *= ninvs[freq]\n a_l = fft.fft(d1,axes=[-2,-1])\n a_l *= beams[freq]*precond_2d\n d1 = numpy.real(fft.ifft(a_l,axes=[-2,-1],normalize=True))\n d1 = numpy.reshape(d1,(nx*ny))\n d2 += d1\n return d2\n \n def computeClusterY(d0):\n \"\"\"\n For cluster, y = F^T A^T N^-1 d, where F is TSZ spatial template for cluster.\n \"\"\"\n d2 = numpy.zeros(nCluster)\n for ic in range(nCluster):\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2[ic] += numpy.sum(d1 * ninvs[freq] * clumaps[0][ic][freq] * g_nu[freq])\n return d2\n \n def computeClusterKSZY(d0):\n \"\"\"\n For cluster, y = K^T A^T N^-1 d, where K is KSZ spatial template for cluster.\n \"\"\"\n d2 = numpy.zeros(nCluster)\n for ic in range(nCluster):\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2[ic] += numpy.sum(d1 * ninvs[freq] * clumaps[1][ic][freq])\n return d2\n \n def computeMonopoleY(d0):\n \"\"\"\n Overall monopole amplitude.\n \"\"\"\n d2 = 0\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2 += numpy.sum(d1 * ninvs[freq])\n return(d2)\n \n \n # CMB realisation; convolve white noise map with beam and multiply by \n # signal covmat S^1/2 in harmonic space\n b0 = numpy.random.randn(ny,nx)\n a_l = numpy.fft.fft2(b0, b0.shape)\n a_l *= precond_2d * power_2d**(-0.5)\n b0 = numpy.fft.irfft2(a_l, b0.shape)\n \n # Calculate per-band noise realisation.\n # Multiply by pixel-space N^1/2, convolve with beam, and sum over \n # cluster pixels to get RHS\n b1 = 0; b4 = 0\n b2 = numpy.zeros(nCluster)\n if ksz: b3 = numpy.zeros(nCluster)\n \n for freq in range(nFreq):\n _b = numpy.random.randn(ny,nx) * ninvs[freq]**0.5\n a_l = numpy.fft.fft2(_b) * beams[freq] * precond_2d\n b1 += numpy.fft.irfft2(a_l, _b.shape)\n b4 += numpy.sum(_b)\n for ic in range(nCluster):\n b2[ic] += numpy.sum( _b * g_nu[freq] * clumaps[0][ic][freq] )\n if ksz: b3[ic] += numpy.sum( _b * clumaps[1][ic][freq] )\n\n b0 = numpy.reshape(b0,(nx*ny))\n b1 = numpy.reshape(b1,(nx*ny))\n \n\n # Compute CMB and cluster data parts of b\n b_CMB = computeCMBY(datamaps) + b0 + b1\n b_mono = computeMonopoleY(datamaps) + b4\n b_tsz = computeClusterY(datamaps) + b2\n if ksz: b_ksz = computeClusterKSZY(datamaps) + b3\n \n # Return total b vector (Ncmbpix + 1 + (1|2)*Ncluster elements in vector)\n b = numpy.append(b_CMB, b_mono)\n b = numpy.append(b, b_tsz)\n if ksz: b = numpy.append(b, b_ksz)\n return b", "def gen_centers(self):\n\n \"\"\"x_track = self.cs.discrete_rollout()\n t = np.arange(len(x_track))*self.dt\n # choose the points in time we'd like centers to be at\n c_des = np.linspace(0, self.cs.run_time, self.n_bfs)\n self.c = np.zeros(len(c_des))\n for ii, point in enumerate(c_des):\n diff = abs(t - point)\n self.c[ii] = x_track[np.where(diff == min(diff))[0][0]]\"\"\"\n\n # desired activations throughout time\n des_c = jnp.linspace(0, self.cs.run_time, self.n_bfs)\n\n self.c = np.ones(len(des_c))\n for n in range(len(des_c)):\n # finding x for desired times t\n self.c[n] = jnp.exp(-self.cs.ax * des_c[n])\n self.c = jnp.array(self.c)", "def stempot(self,xmax,ymax,nx,ny,atms,pixelshift,scalefactor):\n #zed=2 for rutherford scattering of the nucleus, less for screening\n zed = 1.7\n\n ix = numpy.arange(1.0,nx)\n iy = numpy.arange(1.0,ny)\n dx = xmax/nx\n dy = ymax/ny\n rx = numpy.arange(0,xmax-dx,dx)\n ry = numpy.arange(0,ymax-dy,dy)\n\n Zatom = atms.get_atomic_numbers()\n #translate atoms such that the center of mass is in the center of the computational cell\n com = atms.get_center_of_mass()\n #com = [ 44.40963074 , 44.65497562 , 44.90406073] #for AuNP\n #com = numpy.array(com)\n #print 'com',com -0.149836425, 0.29967285, 0\n #com += [0.41205016875, 0.6742639125, 0] #for rotated line profile \n #com += [-0.149836425, 0.29967285, 0] #for AuNP\n #com += pixelshift\n #print 'com+pixelshift',com\n cop = xmax/2.0\n trans = [cop-i for i in com]\n atms.translate(trans)\n positions=atms.get_positions()\n ax=[]\n ay=[]\n az=[]\n for o,t,h in positions:\n ax.append(o)\n ay.append(t)\n az.append(h)\n ax = numpy.array(ax)\n ay = numpy.array(ay)\n az = numpy.array(az)\n amax = len(Zatom)\n\n #find boundaries of slice\n axmin = min(ax)\n axmax = max(ax)\n aymin = min(ay)\n aymax = max(ay)\n\n V= numpy.zeros((nx,ny))\n\n #map x and y coords of the atoms to the nearest grid points\n #A fraction of the atom must be assigned to the closest gridpoints\n #to avoid sum and difference frequencies appearing in the image\n #grid point to the left of the atom\n ix = numpy.array([math.floor(axi/dx) for axi in ax])\n #apply periodic boundary conditions\n iax = numpy.array([math.fmod(iaxi,nx) for iaxi in ix])\n ibx = numpy.array([math.fmod(iaxi+1,nx) for iaxi in ix])\n #fraction of atom at iax\n fax = numpy.array([1-math.fmod((axi/dx),1 ) for axi in ax])\n #grid point above the atom\n iy = numpy.array([math.floor(ayi/dy) for ayi in ay])\n #apply periodic boundary conditions\n iay = numpy.array([math.fmod(iayi,ny) for iayi in iy])\n iby = numpy.array([math.fmod(iayi+1,ny) for iayi in iy])\n #fraction of atom at iay \n fay = numpy.array([1-math.fmod((ayi/dy),1 ) for ayi in ay])\n #Add each atom to the potential grid\n V1 = numpy.array([fax[i] * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V2 = numpy.array([(1-fax[i]) * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V3 = numpy.array([fax[i] * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n #V1 = numpy.array([fax[i] * fay[i] * scalefactor for i in range(len(fax))])\n #V2 = numpy.array([(1-fax[i]) * fay[i] * scalefactor for i in range(len(fax))])\n #V3 = numpy.array([fax[i] * (1-fay[i]) * scalefactor for i in range(len(fax))])\n #V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * scalefactor for i in range(len(fax))])\n\n for j in range(amax):\n V[iax[j],iay[j]] += V1[j]\n V[ibx[j],iay[j]] += V2[j]\n V[iax[j],iby[j]] += V3[j]\n V[ibx[j],iby[j]] += V4[j]\n rev_trans = [-1.0*i for i in trans]\n atms.translate(rev_trans)\n return V", "def adaptiveCentroid(data=None,sigma=None):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol))\n maxItr = 50\n EP = 0.0001\n for i in range(maxItr):\n wrmat = wr(ROW,COL,rowmean,colmean,sigma)\n IWmat = data*wrmat\n IWcol = IWmat.sum(axis=0)\n IWrow = IWmat.sum(axis=1)\n drowmean = np.sum((rowgrid-rowmean)*IWrow)/np.sum(IWrow)\n dcolmean = np.sum((colgrid-colmean)*IWcol)/np.sum(IWcol)\n rowmean = rowmean+2.*drowmean\n colmean = colmean+2.*dcolmean\n if drowmean**2+dcolmean**2 <= EP:\n break\n\n return rowmean,colmean", "def center_size(boxes):\n return torch.cat([(boxes[:, :2] + boxes[:, 2:])/2, # cx, cy\n boxes[:, :2] - boxes[:, 2:]], 1) # w, h", "def update_centroids(X,idx,K):\n n = np.size(X,1)\n centroids = np.zeros((K,n))\n for i in range(0,K):\n ci = idx==i\n ci = ci.astype(int)\n total_number = sum(ci)\n ci.resize((np.size(X,0),1))\n total_matrix = np.matlib.repmat(ci,1,n)\n ci = np.transpose(ci)\n total = np.multiply(X,total_matrix)\n try:\n centroids[i] = (1/total_number)*np.sum(total,axis=0)\n except Exception:\n centroids[i] = 0 \n return centroids", "def compute_center(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n sel = PymolPlugin.PymolPlugin().get_model('all')\r\n cnt = len(sel.atom)\r\n\r\n else:\r\n sel = ChimeraPlugin.ChimeraPlugin().select()\r\n cnt = len(ChimeraPlugin.ChimeraPlugin().current_atoms())\r\n\r\n cent_x = 0\r\n cent_y = 0\r\n cent_z = 0\r\n\r\n if cnt == 0:\r\n return 0, 0, 0\r\n\r\n if mole_object.plugin_type == \"PyMOL\":\r\n\r\n for a in sel.atom:\r\n cent_x += a.coord[0]\r\n cent_y += a.coord[1]\r\n cent_z += a.coord[2]\r\n\r\n else:\r\n\r\n for a in ChimeraPlugin.ChimeraPlugin().current_atoms():\r\n cent_x += a.coord()[0]\r\n cent_y += a.coord()[1]\r\n cent_z += a.coord()[2]\r\n\r\n cent_x /= cnt\r\n cent_y /= cnt\r\n cent_z /= cnt\r\n\r\n self.point_x.component('entryfield').setentry(cent_x)\r\n self.point_y.component('entryfield').setentry(cent_y)\r\n self.point_z.component('entryfield').setentry(cent_z)\r\n\r\n self.show_crisscross(mole_object)", "def calCentroids(X, idx, K):\r\n # Useful variables\r\n m, n = X.shape\r\n # You need to return the following variables correctly.\r\n centroids = np.zeros((K, n))\r\n\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n\r\n for i in np.arange(K):\r\n centroids[i] = np.mean(X[idx == i], axis = 0)\r\n\r\n\r\n # =============================================================\r\n return centroids", "def center(x):\n return x - x.mean()", "def ksc_toy(A, K):\n m = A.shape[0]\n mem = ceil(np.dot(K, rand(m, 1)))\n cent = np.zeros(shape=(K, A.shape[1]), dtype='float64')\n for iter_ in range(1, 101):\n prev_mem = mem\n for k in range(1, (K +1)):\n cent[(k -1), :] = ksc_center(mem, A, k, cent[(k -1), :])\n for i in range(1, (m +1)):\n x = A[(i -1), :]\n for k in range(1, (K +1)):\n y = cent[(k -1), :]\n dist = dhat_shift(x, y)\n D[(i -1), (k -1)] = dist\n val, mem = np.min(D, np.array([]), 2) # nargout=2\n if norm(prev_mem - mem) == 0:\n break\n return mem, cent", "def centroid(func, step=0.1):\n points = func.points(step)\n num, den = 0, 0\n\n for x, y in points:\n num += x * y\n den += y\n\n return num / den", "def calc_msd(pos_x, pos_y, pos_z):\n particles = pos_x.shape[0]\n N = pos_x.shape[1] \n tamsd = np.zeros(shape = (particles, N - 1)) \n\n for p in np.arange(start = 0, stop = particles, step = 1): \n for n in np.arange(start = 1, stop = N, step = 1): \n sumdis = np.array([((pos_x[p, i + n] - pos_x[p, i]) ** 2 + (pos_y[p, i + n] - pos_y[p, i]) ** 2 + (pos_z[p, i + n] - pos_z[p, i]) ** 2) for i in np.arange(start = 1, stop = N - n, step = 1)]).sum()\n tamsd[p, n] = sumdis / (N - n) \n return tamsd", "def centroid(self, unit='spatial'):\n com = ndimage.center_of_mass(self.data)\n if unit != 'spatial':\n return com\n else:\n # tuple - cast from generator\n # sample spacing - indices to units\n # x-c -- index shifted from center\n return tuple(self.sample_spacing * (x-c) for x, c in zip(com, (self.center_y, self.center_x)))", "def iterate_center_of_mass(sphere, inner_radius, stepsize=0.05,\n com_kwargs=None):\n\n if com_kwargs is None:\n com_kwargs = {}\n\n yield sphere\n while (sphere.radius > inner_radius):\n com = sphere.quantities.center_of_mass(**com_kwargs)\n try:\n sphere = sphere.ds.sphere(com, (1-stepsize) * sphere.radius)\n yield sphere\n except YTSphereTooSmall:\n yield None\n break", "def _center(self, forces):\n\t\t\n\t\tzipped = zip(self.grid.corners(), forces)\n\t\treturn self._weightedAverage(zipped)", "def compute_centers(landmarks):\n b = landmarks.shape[0]\n lms = landmarks.reshape((b, -1, 3))\n\n eye_left_centers = lms[:, EYE_LEFT_CONTOUR, :2].mean(axis=1)\n eye_right_centers = lms[:, EYE_RIGHT_CONTOUR, :2].mean(axis=1)\n mouth_centers = lms[:, MOUTH_INNER_CONTOUR, :2].mean(axis=1)\n\n a = np.concatenate((eye_left_centers, eye_right_centers, mouth_centers), axis=1)\n\n return a", "def center(box):\n x_center = box[:, 0] + (box[:, 2] - box[:, 0]) // 2\n y_center = box[:, 1] + (box[:, 3] - box[:, 1]) // 2\n return torch.stack((x_center, y_center)).t().to(box.device)", "def center_of_mass_polygon(polygon):\n L = 0\n cx = 0\n cy = 0\n cz = 0\n p = len(polygon)\n for i in range(-1, p - 1):\n p1 = polygon[i]\n p2 = polygon[i + 1]\n d = distance_point_point(p1, p2)\n cx += 0.5 * d * (p1[0] + p2[0])\n cy += 0.5 * d * (p1[1] + p2[1])\n cz += 0.5 * d * (p1[2] + p2[2])\n L += d\n cx = cx / L\n cy = cy / L\n cz = cz / L\n return cx, cy, cz", "def M_step(X, gamma):\n N = X.shape[0] # number of objects\n C = gamma.shape[1] # number of clusters\n d = X.shape[1] # dimension of each object\n\n ### YOUR CODE HERE\n qsum = np.sum(gamma, axis=0)\n pi = qsum/N\n \n # Update mu\n mu = np.zeros((C,d))\n for c in range(C):\n mu_sum = np.zeros((d,))\n for i in range(N):\n x_i = X[i]\n mu_sum += gamma[i,c] * x_i\n mu[c] = mu_sum / qsum[c]\n \n \n # Update sigma\n sigma = np.zeros((C, d, d))\n for c in range(C):\n sigma_sum = np.zeros((d,d))\n for i in range(N):\n x_i = X[i]\n td = (x_i - mu[c]).reshape((d,1))\n sigma_sum += gamma[i,c] * td.dot(td.T)\n sigma[c] = sigma_sum / qsum[c]\n\n return pi, mu, sigma", "def cell_centroids_original(crd, con):\n \n nele = con.shape[0]\n dim = crd.shape[1]\n centroid_xy = np.zeros((nele, dim))\n for i in range(len(con)):\n el_crds = crd[con[i, :], :] # (4, 2)\n centroid_xy[i, :] = (el_crds).mean(axis=0)\n return centroid_xy", "def center(self):\n\n ca_atoms = self.ca_atoms\n ca_atom_vectors = ca_atoms[\"ca.atom\"].to_list()\n ca_atom_vectors = [i for i in ca_atom_vectors if i is not None]\n centroid = self.center_of_mass(ca_atom_vectors, geometric=False)\n centroid = Vector(centroid)\n\n return centroid", "def center_data(x: npt.NDArray, y: npt.NDArray) -> Tuple[npt.NDArray, ...]:\n centroid = np.array([x.mean(), y.mean()])\n xc = x - centroid[0]\n yc = y - centroid[1]\n return xc, yc, centroid", "def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs", "def compute_barycenters(self):\n barycenters = []\n for i,tri in enumerate(self.tri_pnts):\n #import pdb; pdb.set_trace()\n barycenters.append(1./3.*self.points[tri].sum(axis=0))\n tri0, tri1, tri2 = tri\n a = np.linalg.norm(self.points[tri1]-self.points[tri0])\n b = np.linalg.norm(self.points[tri2]-self.points[tri1])\n c = np.linalg.norm(self.points[tri0]-self.points[tri2])\n #import pdb; pdb.set_trace()\n self.side_length[i,:] = a,b,c\n s = 0.5 * (a+b+c)\n self.V[i] = math.sqrt(s*(s-a)*(s-b)*(s-c));\n self.lmin[i] = 2*self.V[i]/s\n return np.array(barycenters)", "def calc_mass(self):\n\n star = self.star\n\n M, K, N = star.mesh_size\n ph = star.phi_coords\n mu = star.mu_coords\n r = star.r_coords\n\n def Q1(j, k):\n sum = 0\n\n for i in range(0, M - 2, 2):\n sum += (1 / 6) * (ph[i + 2] - ph[i]) * (star.rho[i, j, k] +\n 4 *\n star.rho[i + 1, j, k]\n + star.rho[i + 2, j, k])\n\n return 2 * sum\n\n def Q2(k):\n sum = 0\n\n for j in range(0, K - 2, 2):\n sum += (1 / 6) * (mu[j + 2] - mu[j]) * \\\n (Q1(j, k) + 4 * Q1(j + 1, k) + Q1(j + 2, k))\n\n return 2 * sum\n\n mass = 0\n\n for k in range(0, N - 2, 2):\n mass += (1 / 6) * (r[k + 2] - r[k]) * (r[k]**2 * Q2(k) +\n 4 * r[k + 1]**2 * Q2(k + 1) +\n r[k + 2]**2 * Q2(k + 2))\n\n return mass", "def log_marginal_likelihood_normal_cdf(self):\n #we define the loop for the batchsize\n num_batches = int(np.ceil(self.W.shape[0] / self.batchsize_dim))\n slices=np.array_split(np.arange(0,self.W.shape[0]),num_batches)\n def batch_indices(iter):\n idx = iter \n return slice(slices[idx][0],slices[idx][-1]+1)\n \n batch_slices=[batch_indices(iter) for iter in range(num_batches)]\n #print(batch_slices,num_batches,self.batchsize_dim)\n def innerloop(slices):\n if type(slices)!=list:\n slices=[slices]\n #print(slices)\n ml=[]\n for idx in slices:\n if self.type_y=='affine':\n γp, Γp, _, _ = self.compute_gammas_affine(self.params,self.X,self.W[idx,:],self.Z[idx,:])\n elif self.type_y=='mixed':\n γp, Γp, _, _, _ = self.compute_gammas_mixed(self.params,self.X,self.Y,self.C,self.W[idx,:],self.Z[idx,:])\n #print(y1.shape)\n res = gaussianCDF(Γp,-np.ones((γp.shape[0],1))*np.inf,γp)\n ml.append(res)\n return ml\n \n if self.type_y=='affine':\n \n results = Parallel(n_jobs=self.num_cores )(delayed(innerloop)(b) for b in batch_slices)\n #print(results)\n res1=np.sum(results)\n \n _, _, γ, Γ = self.compute_gammas_affine(self.params,self.X,self.W[[0],:],self.Z[[0],:])#we only need γ, Γ\n #print()\n if self.latent_dim>0:\n res2 = gaussianCDF(Γ+self.jitter*np.eye(Γ.shape[0]),-np.ones((γ.shape[0],1))*np.inf,γ)\n logres2 = np.log(res2+1e-200)\n else:\n logres2 = 0.0\n #print( np.log(res1+1e-300),logres2)\n res= np.log(res1+1e-300)-logres2 \n elif self.type_y=='regression':\n if self.latent_dim>0:\n γp, Γp, γ, Γ = self.compute_gammas_regression(self.params,self.X,self.Y,self.C)\n res2 = gaussianCDF(Γ+self.jitter*np.eye(Γ.shape[0]),-np.ones((γ.shape[0],1))*np.inf,γ)\n #from scipy.stats import multivariate_normal\n try:\n res1 = gaussianCDF(Γp,-np.ones((γp.shape[0],1))*np.inf,γp)\n res= np.log(res1+1e-300)-np.log(res2+1e-300)\n except:\n #print(self.params, Γp)\n res=-10.0**300\n else:\n return 0.0\n elif self.type_y=='mixed':\n results = Parallel(n_jobs=self.num_cores )(delayed(innerloop)(b) for b in batch_slices)\n res1=np.sum(results)\n _, _, γ, Γ = self.compute_gammas_affine(self.params,self.X,self.W[[0],:],self.Z[[0],:])#we only need γ, Γ\n if self.latent_dim>0:\n res2 = gaussianCDF(Γ+self.jitter*np.eye(Γ.shape[0]),-np.ones((γ.shape[0],1))*np.inf,γ)\n logres2 = np.log(res2+1e-200)\n else:\n logres2 = 0.0\n res= np.log(res1+1e-300)-logres2\n if np.isnan(res):\n return -10.0**300 \n else:\n return res", "def get_cell_centroids(mesh):\n num_els = mesh.num_cells()\n coords = mesh.coordinates()\n cells = mesh.cells()\n dim = len(coords[0])\n\n cell_cent = np.zeros((num_els, dim), dtype=float, order='c')\n\n for i in range(num_els):\n pts = [coords[idx] for idx in cells[i]]\n cell_cent[i] = (1/(dim+1))*sum(pts) #this works only for 2D/3D triangles\n\n return cell_cent", "def _drawCenterOfMass(self, planets):\n if not planets:\n return Vec2()\n\n center_of_mass = planets[0].mass * planets[0].pos\n for p in planets[1:]:\n center_of_mass += p.mass * p.pos\n center_of_mass /= sum([p.mass for p in planets])\n\n screen_coords = self._posToScreenCoords(center_of_mass)\n\n if not self._isInScreen(screen_coords):\n return\n\n pygame.draw.line(\n self.screen,\n (200, 100, 100),\n (screen_coords[0] - 3, screen_coords[1]),\n (screen_coords[0] + 3, screen_coords[1]),\n )\n pygame.draw.line(\n self.screen,\n (200, 100, 100),\n (screen_coords[0], screen_coords[1] - 3),\n (screen_coords[0], screen_coords[1] + 3),\n )", "def centering (nums):\r\n n_mean = mean(nums)\r\n for i in xrange(len(nums)):\r\n nums[i] = float(nums[i]-n_mean)\r\n return nums", "def tile_calculation(xi, yi, axi, ayi, positions, weights):\n for j in range(cuda.blockDim.x):\n xj = positions[j,0]\n yj = positions[j,1]\n wj = weights[j]\n axi, ayi = body_body_interaction(xi, yi, xj, yj, wj, axi, ayi)\n return axi, ayi", "def gen_center(T, y):\r\n T_pos = [T[i] for i in range(len(y)) if y[i] == 1]\r\n C = np.mean(T_pos, 0).reshape(1, -1)\r\n return C", "def m200(z, x, y=[], zo=0, xycenter=None, xyunit='deg', zunit='redshift',\n membership='shifting gapper', correct_profile='mbm10',\n mass_scaling='evrard08', converge=True, bootstrap=1000):\n if zo == 0:\n zo = scipy.median(z)\n if zunit == 'velocity':\n v = z\n zo /= c\n else:\n v = c * (z-zo)/(1+zo)\n # then x corresponds to cluster-centric distance:\n if len(y) == 0:\n if xyunit == 'kpc':\n r = x / 1e3\n elif xyunit in ('deg', 'arcmin', 'arcsec'):\n r = cosmology.dProj(zo, x, input_unit=xyunit, unit='Mpc')\n # otherwise use the given center to calculate distances\n elif xyunit in ('kpc', 'Mpc'):\n r = scipy.hypot(x, y)\n if xyunit == 'kpc':\n r /= 1e3\n else:\n if xyunit == 'arcsec':\n x /= 3600.\n y /= 3600.\n if xyunit == 'arcmin':\n x /= 60.\n y /= 60.\n dist = astCoords.calcAngSepDeg(x, y, xycenter[0], xycenter[1])\n r = cosmology.dProj(zo, dist, input_unit='deg', unit='Mpc')\n\n \n\n\n \n return", "def clusters_allocate_cells(self):\n for cluster in self.clusters:\n cluster.cells[:] = []\n for cell in self.block_proc:\n wdists = []\n for cluster in self.clusters:\n s = cluster.size\n d = ( (cell.x-cluster.x)**2 + (cell.y-cluster.y)**2 +\n (cell.z-cluster.z)**2 )\n d = numpy.sqrt(d)\n c = self.c\n # TODO: choose a better distance function below\n r = d*(c+(1-c)*numpy.exp(-s/d))\n r = numpy.clip(r,0,r)\n wdists.append(r)\n self.clusters[numpy.argmin(wdists)].cells.append(cell)", "def center_of_mass(self, tolerance=1e-9):\n props = GProp_GProps()\n brepgprop_VolumeProperties(self.topods_solid(), props, tolerance)\n com = props.CentreOfMass()\n return geom_utils.gp_to_numpy(com)" ]
[ "0.7113222", "0.6913504", "0.68147796", "0.66269344", "0.6623906", "0.6605114", "0.6573181", "0.6555954", "0.65233356", "0.65076435", "0.6476598", "0.64425707", "0.6414334", "0.6406729", "0.63674235", "0.63496435", "0.6226081", "0.6160094", "0.61026037", "0.6084239", "0.6066669", "0.60612375", "0.60513264", "0.603614", "0.60261333", "0.6025939", "0.60209906", "0.595546", "0.5950495", "0.58809245", "0.58806586", "0.5863308", "0.5859134", "0.58394444", "0.5770351", "0.5767446", "0.5762359", "0.5761856", "0.5756736", "0.5754618", "0.5740314", "0.57166445", "0.57137406", "0.56523776", "0.56371766", "0.5626732", "0.56064093", "0.5605906", "0.559901", "0.55935264", "0.558686", "0.55822754", "0.557583", "0.557332", "0.5572636", "0.5569737", "0.5568278", "0.5562654", "0.5537748", "0.55375373", "0.5536268", "0.55321985", "0.55250484", "0.55242234", "0.55143994", "0.55115956", "0.5500751", "0.54955775", "0.5490137", "0.54654074", "0.5458902", "0.5455092", "0.5454274", "0.54542243", "0.5442751", "0.54406315", "0.5434115", "0.54311895", "0.54267967", "0.54144394", "0.5413527", "0.5410412", "0.54097563", "0.54039615", "0.53971094", "0.5396325", "0.53951246", "0.5382218", "0.5377586", "0.5374126", "0.53696257", "0.5368056", "0.5354712", "0.53499687", "0.5341206", "0.5335175", "0.53208756", "0.53132766", "0.53131324", "0.5313101" ]
0.74505234
0
Creates a picture grid (left to right, top to bottom).
def image_grid(images: np.ndarray, nrow: int = 8, fill_value=(0, 0, 0)) -> np.ndarray: if not images.ndim == 4: raise ValueError("Input shape must be [n, height, width, channel]") h, w = images.shape[1:3] nbatch = images.shape[0] channels = images.shape[3] rows = ceildiv(nbatch, nrow) cols = min(nbatch, nrow) if channels != len(fill_value): raise ValueError("fill_value must match number of channels") out = np.full((h * rows, w * cols, channels), fill_value, dtype=images.dtype) for i in range(images.shape[0]): row = i // cols col = i % cols out[h * row : h * (row + 1), w * col : w * (col + 1), :] = images[i] return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_grid(images, n_rows=4, n_cols=4):\n k = min(n_rows * n_cols, len(images))\n indices = [i for i in range(k)]\n return _create_grid(images, indices, n_rows, n_cols)", "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0,0,0,0])", "def generate_image_grid(sess, op):\n n = 10\n x_points = np.linspace(-20, 20, n)\n y_points = np.linspace(-20, 20, n)\n\n nx, ny = len(x_points), len(y_points)\n plt.subplot()\n gs = gridspec.GridSpec(nx, ny, hspace=0.05, wspace=0.05)\n\n for i, g in enumerate(gs):\n z = np.concatenate(([x_points[int(i / ny)]], [y_points[int(i % nx)]]))\n z = np.reshape(z, (1, 2))\n x = sess.run(op, feed_dict={decoder_input: z})\n ax = plt.subplot(g)\n img = np.array(x.tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_aspect('auto')\n plt.show()", "def create_grid(grid):\r\n inner = [0]*4\r\n for i in range(4):\r\n grid.append(inner[:])", "def grid_maker(width, height):\n grid = [['.' for i in range(width)] for j in range(height)]\n return grid", "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0]*4)\r\n return grid", "def create_grid(self):\n for y_iter in range(self.NUM_GRIDS):\n for x_iter in range(self.NUM_GRIDS):\n x, y = x_iter * self.SQUARE_SIZE, y_iter * self.SQUARE_SIZE\n x_stop, y_stop = x + self.SQUARE_SIZE, y + self.SQUARE_SIZE\n cords = x, y, x_stop, y_stop\n self.canvas.create_rectangle(cords, outline=self.color,\n fill=self.default_color)", "def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels", "def grid(images, cols = 2, save = False, filename = \"\", show = False):\n \n rows = ceil(len(images) / cols)\n \n fig, ax = plt.subplots(rows, 1)\n\n index = 0\n element = []\n for row in range(rows):\n for col in range(cols): \n if index < len(images):\n element.append(images[index])\n index += 1\n \n stack = np.hstack(tuple(element))\n ax[row].axis('off')\n ax[row].imshow(stack)\n element = []\n \n plt.tight_layout()\n \n if save:\n fig.savefig(filename)\n\n if show:\n plt.show(fig)\n \n return 0", "def create_grid(self):\n return [[0] * self.width for _ in range(self.height)]", "def make_grid(X,Y): \r\n grid = []\r\n for j in range(Y):\r\n row = []\r\n for i in range(X):\r\n row.append( block((i,j)) )\r\n grid.append(row)\r\n return grid", "def grid_image(output):\n grid = []\n for data in output:\n grid += [make_grid(data, nrow=5, normalize=True)]\n return grid", "def make_grid(self):\n for k in range(0, NUM + 1):\n self.create_line(k * UNIT, 0, k * UNIT, SIZE, width=THICKNESS)\n self.create_line(0, k * UNIT, SIZE, k * UNIT, width=THICKNESS)", "def make_grid(self):\n length = self.size / 8\n # draw horizontal lines\n for y in range(0, self.size, length):\n self.window.create_line(0, y, self.size, y, fill = \"blue\")\n \n # draw vertical lines\n for x in range(0, self.size, length):\n self.window.create_line(x, 0, x, self.size, fill = \"blue\")\n\n # draw the axes red\n self.window.create_line(\n 0,\n self.size / 2,\n self.size, \n self.size / 2, \n fill = \"red\"\n )\n self.window.create_line(\n self.size / 2, 0,\n self.size / 2, \n self.size, \n fill = \"red\"\n )\n print(\"Grid Made.\")", "def make_grid(self):\n\n\t\tinit_grid = (self.grid_width//2, self.grid_height//2)\n\t\tgrid_list = []\n\n\t\tfor i in range(self.canv_width//self.grid_width):\n\t\t\tfor j in range(self.canv_height//self.grid_height):\n\t\t\t\tif j == 0 or j%2 ==0:\n\t\t\t\t\tgrid_list.append((init_grid[0]+i*self.grid_width, init_grid[1]+j*self.grid_height))\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tgrid_list.append((grid_list[-1][0]+(self.grid_width//2), init_grid[1]+j*self.grid_height))\n\n\t\treturn grid_list", "def make_image(self, save=False):\n\n # image_grid = np.full((self.size_x, self.size_y), '#888888', dtype=str)\n image_grid = np.full((self.size_x, self.size_y, 3), 0, dtype=np.uint8)\n\n # self.grid = np.flip(self.grid, 1)\n\n # self.grid = np.swapaxes(self.grid, 0, 0)\n \"\"\"\n image_grid[self.grid == 0] = 'FFFFFF'\n image_grid[self.grid == 1] = '000000'\n image_grid[self.grid == 2] = '00FF00'\n image_grid[self.grid == 3] = '0000FF'\n image_grid[self.grid == 4] = 'FFFF00'\n image_grid[self.grid == 5] = '00FFFF'\n image_grid[self.grid == 6] = 'FF00FF'\n \"\"\"\n image_grid[self.grid == 0] = (1, 1, 1)\n image_grid[self.grid == 1] = (0, 0, 0)\n image_grid[self.grid == 2] = (1, 0, 1)\n image_grid[self.grid == 3] = (0, 1, 0)\n image_grid[self.grid == 4] = (0, 0, 1)\n image_grid[self.grid == 5] = (0, 1, 1)\n image_grid[self.grid == 6] = (1, 1, 0)\n\n #for ant in self.ants:\n # image_grid[ant.x, ant.y] = (1, 0, 0)\n\n # image_grid = image_grid.swapaxes(0, 1)\n # self.grid = self.grid.swapaxes(0, 1)\n\n\n\n DPI = 100\n width, height = 1000, 1000\n fig = plt.figure(figsize=(width / DPI, height / DPI), dpi=DPI, facecolor='k')\n ax = fig.add_subplot()\n\n plt.axis('equal')\n plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\n\n for y in range(self.size_x):\n for x in range(self.size_y):\n if self.grid[x, y] != 0:\n # Only plot a hexagon if its state is not zero.\n plot_hex(ax, x, y, image_grid[x, y])\n\n ax.set_xlim(0, self.size_x)\n ax.set_ylim(0, self.size_y)\n\n plt.show()\n\n logging.info(\"Finished Image Processing\")", "def draw_grid(self):\n for x in range(0, WIDTH, TILESIZE):\n pg.draw.line(self.screen, LIGHTGREY, (x, 0), (x, HEIGHT))\n \n for y in range(0, HEIGHT, TILESIZE):\n pg.draw.line(self.screen, LIGHTGREY, (0, y), (WIDTH, y))", "def create_grid(self):\n\n # If called when a grid already exists create a new grid\n if self.grid:\n self.grid = []\n\n grid_pen = QPen(QColor(215, 215, 215), 1)\n w = 10000\n h = 10000\n self.addLine(-10000, 0, 10000, 0, QPen(QColor(0, 0, 0), 2))\n self.addLine(0, -10000, 0, 10000, QPen(QColor(0, 0, 0), 2))\n\n w = int(w / self.grid_spacing) * self.grid_spacing\n h = int(h / self.grid_spacing) * self.grid_spacing\n for i in range(-w, w, self.grid_spacing):\n if i == 0:\n pass\n else:\n line = self.addLine(-w, i, w, i, grid_pen)\n line.setZValue(-1)\n self.grid.append(line)\n for i in range(-h, h, self.grid_spacing):\n if i == 0:\n pass\n else:\n line = self.addLine(i, -h, i, h, grid_pen)\n line.setZValue(-1)\n self.grid.append(line)\n\n self.grid_built = True", "def generate_image_grid(sess, df, filenames,op, op2):\n #x_points = np.arange(0, 1, 1.5).astype(np.float32)\n #y_points = np.arange(0, 1, 1.5).astype(np.float32)\n\n nx, ny = 12, 1\n #plt.subplot()\n gs = gridspec.GridSpec(nx, ny, hspace=1, wspace=0.05)\n # input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n #\n # plt.imshow(np.array(df[0].tolist()).reshape(28, 28), cmap='gray')\n # plt.show()\n # x = sess.run(op, feed_dict={decoder_input: input_x[0].reshape(1,2)})\n # img = np.array(x.tolist()).reshape(28, 28)\n #\n # plt.imshow(img, cmap='gray')\n # plt.show()\n\n \"\"\" grid \"\"\"\n input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n for i, g in enumerate(gs):\n\n x = sess.run(op, feed_dict={decoder_input: input_x[i].reshape(1,2)})\n ax = plt.subplot(g)\n img = np.array(x.tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()\n\n for i, g in enumerate(gs):\n\n ax = plt.subplot(g)\n img = np.array(df[i].tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()", "def create_grid(height, width):\n grid = []\n \n for r in range(height):\n row = [0] * width # a row containing width 0s\n grid += [row]\n\n return grid", "def make_im_grid(ims, n_rows, n_cols, space, pad_val):\n assert (ims[0].ndim == 3) and (ims[0].shape[0] == 3)\n assert len(ims) <= n_rows * n_cols\n h, w = ims[0].shape[1:]\n H = h * n_rows + space * (n_rows - 1)\n W = w * n_cols + space * (n_cols - 1)\n if isinstance(pad_val, np.ndarray):\n # reshape to [3, 1, 1]\n pad_val = pad_val.flatten()[:, np.newaxis, np.newaxis]\n ret_im = (np.ones([3, H, W]) * pad_val).astype(ims[0].dtype)\n for n, im in enumerate(ims):\n r = n // n_cols\n c = n % n_cols\n h1 = r * (h + space)\n h2 = r * (h + space) + h\n w1 = c * (w + space)\n w2 = c * (w + space) + w\n ret_im[:, h1:h2, w1:w2] = im\n\n return ret_im", "def make_grid_floor_plan(tensor, box, nrow=8, padding=2,\n normalize=False, range=None, \n scale_each=False, pad_value=0):\n # make the mini-batch of images into a grid\n # nmaps = tensor.size(0)\n nmaps = len(box)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n # height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n height, width = int(256 + padding), int(256 + padding)\n tensor = torch.ones(())\n grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value)\n # # add the white image into the grid\n # block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n\n wall_thickness = 2\n wall_symbol = 2.0\n\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n # add the white image into the grid\n block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n num_curr_box = box[k][0].size(0)\n \n # sorted the box according to their size\n sorted_box = {}\n for z in irange(num_curr_box):\n curr_box = box[k][0][z]\n x1, y1, x2, y2 = curr_box[0], curr_box[1], curr_box[2], curr_box[3]\n sorted_box[z] = (x2-x1)*(y2-y1)\n # to get sorted id\n sorted_box = sorted(sorted_box.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)\n\n # obtain the sorted box and corresponding label\n for m in irange(num_curr_box):\n # get sorted id\n z = sorted_box[m][0]\n # label = box[1][k][z].item()\n try:\n label = box[k][1][z].item()\n except:\n assert False\n # draw box in the current image\n if label != -1:\n block = draw_floor_plan(block, box[k][0][z], label)\n # print(k, z)\n else:\n break\n\n # copy the current image to the grid\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(block)\n k = k + 1\n return grid", "def _make_grid(imarray, cols=4, pad=1, padval=255):\n pad = int(pad)\n if pad < 0:\n raise ValueError('pad must be non-negative')\n cols = int(cols)\n assert cols >= 1\n N, H, W, C = imarray.shape\n rows = N // cols + int(N % cols != 0)\n batch_pad = rows * cols - N\n assert batch_pad >= 0\n post_pad = [batch_pad, pad, pad, 0]\n pad_arg = [[0, p] for p in post_pad]\n imarray = np.pad(imarray, pad_arg, 'constant', constant_values=padval)\n H += pad\n W += pad\n grid = (imarray\n .reshape(rows, cols, H, W, C)\n .transpose(0, 2, 1, 3, 4)\n .reshape(rows * H, cols * W, C))\n if pad:\n grid = grid[:-pad, :-pad]\n return grid", "def gen_grid(grid_width, grid_height):\n\n grid = []\n for x in range(0, grid_width):\n grid.append([])\n for y in range(0, grid_height):\n grid[x].append(False)\n return grid", "def create_rand_grid(images, n_rows=4, n_cols=4):\n k = min(n_rows * n_cols, len(images))\n indices = random.sample(range(len(images)), k)\n return _create_grid(images, indices, n_rows, n_cols)", "def fill_grid(self):\n\n for row_margin, row in enumerate(range(self.rows)):\n self.grid.append([])\n\n for col_margin, col in enumerate(range(self.cols)):\n x = col*self.cell_size + col_margin\n y = row*self.cell_size + row_margin\n\n rect = pygame.Rect(x, y, self.cell_size, self.cell_size)\n\n cell = Cell(row, col, rect)\n\n if row == 7 and col == 3:\n cell.root = True\n self.root = cell\n elif row == 7 and col == 16:\n cell.goal = True\n self.goal = cell\n\n self.grid[row].append(cell)", "def display_grid_squares(x_margin, y_margin, num_rows, num_cols, sep):\n\n for row in range(num_rows):\n for col in range(num_cols):\n x = x_margin + sep * col\n y = y_margin + sep * row\n ellipse(x, y, 3, 3)\n pushMatrix()\n translate(x, y)\n noFill()\n rect(0, 0, 20, 20)\n popMatrix()", "def add_grid(img):\n for i in range(1, MATRIX_SIZE_X):\n x_pos = int(round(i * FRAME_W / MATRIX_SIZE_X, 0))\n cv2.line(img, (x_pos, 0), (x_pos, FRAME_H), (0, 0, 255))\n\n for i in range(1, MATRIX_SIZE_Y):\n y_pos = int(round(i * FRAME_H / MATRIX_SIZE_Y, 0))\n cv2.line(img, (0, y_pos), (FRAME_W, y_pos), (0, 0, 255))\n\n return img", "def draw_grid(self):\n for square in range(COLS+1):\n #vertical lines\n start_pos = (helpers.get_col_left_p(square),helpers.get_row_top_p(0))\n end_pos = (helpers.get_col_left_p(square),helpers.get_row_top_p(ROWS))\n pygame.draw.line(g.screen,WHITE,start_pos,end_pos)\n for square in range(ROWS+1):\n #horizontal lines\n start_pos = (helpers.get_col_left_p(0),helpers.get_row_top_p(square))\n end_pos = (helpers.get_col_left_p(COLS),helpers.get_row_top_p(square))\n pygame.draw.line(g.screen,WHITE,start_pos,end_pos)", "def visualize_grid(Xs, ubound=255.0, padding=1):\n pixel_sz = 2\n (H, W, C, N) = Xs.shape\n\n Xs_resize = np.zeros((H*pixel_sz, W*pixel_sz, C, N))\n Xs = (ubound*(Xs-np.min(Xs))/(np.max(Xs)-np.min(Xs))).astype('uint8')\n\n for c in range(C):\n for n in range(N):\n Xs_resize[:,:,c,n] = imresize(Xs[:,:,c,n], 200, interp='nearest')\n Xs = Xs_resize\n\n (H, W, C, N) = Xs.shape\n low, high = np.min(Xs), np.max(Xs)\n\n if C==1 or C==3:\n grid_size_H = int(ceil(sqrt(N)))\n grid_size_W = int(ceil(sqrt(N)))\n else:\n grid_size_H = N\n grid_size_W = C\n\n count = 0\n grid_height = H * grid_size_H + padding * (grid_size_H-1)\n grid_width = W * grid_size_W + padding * (grid_size_W-1)\n grid = np.zeros((grid_height, grid_width, C))\n y0, y1 = 0, H\n for y in range(grid_size_H):\n x0, x1 = 0, W\n for x in range(grid_size_W):\n if C==1 or C==3:\n img = Xs[:,:,:,count]\n count += 1\n else:\n img = np.expand_dims(Xs[:,:,x,y], axis=-1)\n\n grid[y0:y1, x0:x1, :] = ubound * (img - low) / (high - low)\n x0 += W + padding\n x1 += W + padding\n\n y0 += H + padding\n y1 += H + padding\n\n if C!=3:\n grid = grid[:,:,0]\n return grid", "def drawGrid(self):\n for div in range(NBCELL):\n sec = SSIZE*div\n self.can.create_line(0, sec, GSIZE, sec, width=3, fill=GFILL)\n self.can.create_line(sec, 0, sec, GSIZE, width=3, fill=GFILL)", "def show_image_grid(imgs):\n grd = make_grid(imgs)\n npimg = grd.numpy()\n plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')\n plt.ion()\n plt.show()", "def define_grid(self):\n # Big lines\n self.big_lines = []\n for i in range(1, 3):\n # Vertical line\n self.big_lines.append(pygame.Rect(\n i * SCREEN_WIDTH // 3 - BIG_LINE_WIDTH // 2,\n BIG_LINE_OFFSET,\n BIG_LINE_WIDTH,\n SCREEN_WIDTH - 2 * BIG_LINE_OFFSET\n ))\n\n # Horizontal line\n self.big_lines.append(pygame.Rect(\n BIG_LINE_OFFSET,\n i * SCREEN_WIDTH // 3 - BIG_LINE_WIDTH // 2,\n SCREEN_WIDTH - 2 * BIG_LINE_OFFSET,\n BIG_LINE_WIDTH\n ))\n\n # Small lines\n self.small_lines = []\n for i in range(1, 9):\n if i % 3 == 0:\n continue\n\n # Vertical line\n self.small_lines.append(pygame.Rect(\n i * SCREEN_WIDTH // 9 - SMALL_LINE_WIDTH // 2,\n SMALL_LINE_OFFSET,\n SMALL_LINE_WIDTH,\n SCREEN_WIDTH - 2 * SMALL_LINE_OFFSET\n ))\n\n # Horizontal line\n self.small_lines.append(pygame.Rect(\n SMALL_LINE_OFFSET,\n i * SCREEN_WIDTH // 9 - SMALL_LINE_WIDTH // 2,\n SCREEN_WIDTH - 2 * SMALL_LINE_OFFSET,\n SMALL_LINE_WIDTH\n ))", "def recreate_grid(self):\n\n self.print_numlist = arcade.SpriteList()\n for row in range(ROW_COUNT):\n for column in range(COLUMN_COUNT):\n sprite = arcade.Sprite(\n f\"Numbers/{self.grid[row][column]}.png\", scale=0.2\n )\n x = (MARGIN + WIDTH) * column + MARGIN + WIDTH // 2\n y = (MARGIN + HEIGHT) * row + MARGIN + HEIGHT // 2\n sprite.center_x = x\n sprite.center_y = y\n self.print_numlist.append(sprite)\n # Check to see if all squares have been filled in\n if 0 not in self.grid:\n # if Cameron.Check_for_Completion(self.grid) == True:\n self.done = True", "def imshow_grid(images, shape=[2, 8]):\n fig = plt.figure(1)\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n grid[i].imshow(images[i]) # The AxesGrid object work as a list of axes.\n\n plt.show()", "def imshow_grid(images, shape=[2, 8]):\n fig = plt.figure(1)\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n grid[i].imshow(images[i]) # The AxesGrid object work as a list of axes.\n\n plt.show()", "def createGrid(nx, ny, include_center = False):\n direction = 0\n positions = []\n if (nx > 1) or (ny > 1):\n half_x = int(nx/2)\n half_y = int(ny/2)\n for i in range(-half_y, half_y+1):\n for j in range(-half_x, half_x+1):\n if ((i==0) and (j==0)) and not include_center:\n continue\n else:\n if ((direction%2)==0):\n positions.append([j,i])\n else:\n positions.append([-j,i])\n direction += 1\n return positions", "def _make_grid(self, imageset, format_kwargs=None):\n grid_size = imageset[\"grid_size\"]\n return ImageGrid.from_imageset(\n self._make_stills(imageset, format_kwargs=format_kwargs), grid_size\n )", "def create_grid(size):\n grid = []\n for i in range(size):\n row = ['0']*size\n grid.append(row)\n\n return grid", "def plot_grid(im_list, grid_shape, scale=0.1, axes_pad=0.07):\r\n # https://gist.github.com/lebedov/7018889ba47668c64bcf96aee82caec0\r\n\r\n # Grid must be 2D:\r\n assert len(grid_shape) == 2\r\n\r\n # Make sure all images can fit in grid:\r\n assert np.prod(grid_shape) >= len(im_list)\r\n\r\n grid = ImageGrid(plt.gcf(), 111, grid_shape, axes_pad=axes_pad)\r\n for i, data in enumerate(im_list):\r\n\r\n # Scale image:\r\n im = PIL.Image.fromarray(data)\r\n thumb_shape = [int(scale*j) for j in im.size]\r\n im.thumbnail(thumb_shape, PIL.Image.ANTIALIAS)\r\n data_thumb = np.array(im)\r\n grid[i].plot_nnua(data_thumb)\r\n\r\n # Turn off axes:\r\n grid[i].axes.get_xaxis().set_visible(False)\r\n grid[i].axes.get_yaxis().set_visible(False)", "def generate_grid(height, width):\n return [[random.randint(0, 9) for _ in range(width)] for _ in range(height)]", "def make_grid(self, nx, ny):\n nx_vec = np.arange(nx)\n ny_vec = np.arange(ny)\n yv, xv = np.meshgrid(ny_vec, nx_vec)\n grid = np.stack((yv, xv), axis=2)\n grid = grid.reshape(1, 1, ny, nx, 2)\n return grid", "def draw_grid(self):\n\n # Draw horizontal lines\n for row in range(self.num_rows + 1):\n left = row_column_to_pixels(row, 0)\n right = row_column_to_pixels(row, self.num_cols)\n pygame.draw.line(self.screen, COLOR_MAP['gray'], left, right)\n\n # Draw vertical lines\n for col in range(self.num_cols + 1):\n top = row_column_to_pixels(0, col)\n bottom = row_column_to_pixels(self.num_rows, col)\n pygame.draw.line(self.screen, COLOR_MAP['gray'], top, bottom)", "def create_grid(grid):\r\n for i in range (4):\r\n grid.append ([])\r\n for j in range (4):\r\n grid[i].append (0)", "def grid(self, username, period=CONSTANTS.LAST_FM.PERIOD_7DAYS):\n if period not in PERIODS:\n abort(400, \"invalid period, valid options are: \" + PERIOD_STR)\n\n user = self.get_user(username)\n\n GRID_SIZE = 3\n IMG_SPACING = int(IMAGE_SIZE * 0.05)\n NUM_IMAGES = GRID_SIZE ** 2\n albums = user.get_top_albums(period, limit=NUM_IMAGES + 1)\n if len(albums) < NUM_IMAGES:\n abort(400, f\"{username} does not have enough albums to make a grid\")\n GRID_WIDTH = GRID_SIZE + 1\n new_im = Image.new(\n \"RGB\",\n (\n (GRID_WIDTH * IMAGE_SIZE) + (GRID_SIZE * IMG_SPACING),\n (GRID_SIZE * IMAGE_SIZE) + ((GRID_SIZE - 1) * IMG_SPACING),\n ),\n )\n for y_idx in range(0, GRID_SIZE):\n row_text = []\n for x_idx in range(0, GRID_SIZE + 1):\n grid_img = None\n if x_idx == GRID_SIZE:\n # Make text image\n grid_img = self.row_text(row_text)\n else:\n album_idx = y_idx * 3 + x_idx\n album = albums[album_idx].item\n try:\n imgUrl = album.get_cover_image(size=4)\n except pylast.WSError as e:\n imgUrl = None\n\n grid_img = self.get_image(imgUrl)\n text = \"{}\\n{}\".format(\n album.get_artist().get_name(), album.get_title()\n )\n row_text.append(text)\n x_offset = IMG_SPACING * x_idx\n y_offset = IMG_SPACING * y_idx\n new_im.paste(\n grid_img,\n (\n (x_idx * IMAGE_SIZE) + x_offset,\n (y_idx * IMAGE_SIZE) + y_offset,\n ),\n )\n grid_img.close()\n img_io = BytesIO()\n new_im.save(img_io, format=\"PNG\")\n img_io.seek(0)\n new_im.close()\n return img_io", "def create_grid(size_x, size_y, default=None):\n return [[default for _x in range(size_y)] for _y in range(size_x)]", "def render_image(grid,window):\r\n X = len(grid[0])\r\n Y = len(grid)\r\n#top row:\r\n for j in range(Y):\r\n for sub_j in range(3): #3 rows \r\n ROW = []\r\n for i in range(X):\r\n ROW += grid[j][i].arr[sub_j]\r\n \r\n for k in range(len(ROW)):\r\n COLOR = (ROW[k],ROW[k],ROW[k])\r\n Y_pos = (3*j + sub_j)*pixel_size*scale\r\n X_pos = k*(pixel_size)*scale\r\n width = height = pixel_size*scale\r\n pygame.draw.rect(window,COLOR,(X_pos,Y_pos,width,height))\r\n \r\n# print(ROW)\r\n return", "def np_make_image_grid(images, nrow, pad=2):\n height, width = images[0].shape[:2]\n ncol = int(np.ceil(len(images) / nrow))\n ncolors = images[0].shape[-1]\n result_imshape = [nrow * (height + pad), ncol * (width + pad), ncolors]\n if len(images[0].shape) == 2: # grayscale image\n ncolors = 1\n result_imshape[-1] = 1\n im_result = np.zeros(result_imshape, dtype=images[0].dtype)\n im_idx = 0\n for row in range(nrow):\n for col in range(ncol):\n if im_idx == len(images):\n break\n im = images[im_idx]\n im = normalize(im)\n im_idx += 1\n im_result[row * (pad + height): (row) * (pad + height) + height,\n col * (pad + width): (col) * (pad + width) + width, :] = im.reshape(height, width, -1)\n if ncolors == 1:\n im_result = im_result[:, :, 0]\n return im_result", "def DrawGrid(self, count):\n for i in range(0, self.width, self.incr):\n self.canvas.create_line(i, 100, i, 700, fill = \"#696969\", width = 1)\n for i in range(100, 800, 100):\n self.canvas.create_line(0, i, self.width, i, fill = \"#696969\", width = 1)\n self.canvas.create_rectangle(self.incr * 4, self.height - self.incr * 3.5,\n self.width - self.incr * 4, self.height, fill = \"black\", width = 3)\n for i in range(int(self.height - self.incr * 3.5), self.height, int(self.incr / 4)):\n self.canvas.create_line(self.incr * 4, i, self.width - self.incr * 4,\n i, fill = \"#696969\", width = 1)\n for i in range(self.incr * 4, self.width - self.incr * 4 + 1, int(self.incr / 4)):\n self.canvas.create_line(i, self.height - self.incr * 3.5, i, self.height,\n fill = \"#696969\", width = 1)", "def create_open_positions_grid(self):\n\n counter = 0\n col = 0\n row = 0\n\n for i in range(0, 99):\n if counter % 3 == 0:\n col = 0\n row += 1\n self.gp.addWidget(PositionPanel(), row, col)\n counter += 1\n col += 1", "def draw_grid(self):\n for i in range(N * N + 1):\n color = \"blue\" if i % N == 0 else \"gray\"\n x0 = MARGIN + i * SIDE\n y0 = MARGIN\n x1 = MARGIN + i * SIDE\n y1 = HEIGHT - MARGIN\n self.canvas.create_line(x0, y0, x1, y1, fill=color)\n\n x0 = MARGIN\n y0 = MARGIN + i * SIDE\n x1 = WIDTH - MARGIN\n y1 = MARGIN + i * SIDE\n self.canvas.create_line(x0, y0, x1, y1, fill=color)", "def visualize_grid(Xs, ubound=255.0, padding=1):\n (N, H, W, C) = Xs.shape\n grid_size = int(ceil(sqrt(N)))\n grid_height = H * grid_size + padding * (grid_size - 1)\n grid_width = W * grid_size + padding * (grid_size - 1)\n grid = np.zeros((grid_height, grid_width, C))\n next_idx = 0\n y0, y1 = 0, H\n for y in range(grid_size):\n x0, x1 = 0, W\n for x in range(grid_size):\n if next_idx < N:\n img = Xs[next_idx]\n low, high = np.min(img), np.max(img)\n grid[y0:y1, x0:x1] = ubound * (img - low) / (high - low)\n # grid[y0:y1, x0:x1] = Xs[next_idx]\n next_idx += 1\n x0 += W + padding\n x1 += W + padding\n y0 += H + padding\n y1 += H + padding\n # grid_max = np.max(grid)\n # grid_min = np.min(grid)\n # grid = ubound * (grid - grid_min) / (grid_max - grid_min)\n return grid", "def display_images_in_grid(imgs, row, col):\n if len(imgs) != (row * col):\n raise ValueError(f\"Invalid imgs len:{len(imgs)} col:{row} row:{col}\")\n\n for i, img in enumerate(imgs):\n plot_num = i + 1\n plt.subplot(row, col, plot_num)\n plt.tick_params(labelbottom=False) # remove x axis\n plt.tick_params(labelleft=False) # remove y axis\n plt.imshow(img)\n plt.show()", "def draw_grid_map(img, grid_map, stride):\n image = img_from_array(img)\n draw = ImageDraw.Draw(image)\n counter = 0\n for grid in grid_map:\n draw.rectangle((\n grid[0] + stride // 2 - 2,\n grid[1] + stride // 2 - 2,\n grid[2] + stride // 2 + 2,\n grid[3] + stride // 2 + 2), fill=(255, 255, 255, 0))\n counter += 1\n plt.figure()\n plt.imshow(image)\n plt.show()", "def printgrid(\n rows=2,\n columns=2,\n cell_width=8,\n cell_height=4,\n corner_symbol=\"+\",\n horizontal_symbol=\"-\",\n vertical_symbol=\"|\",\n):\n horizontal_boundary = (\n corner_symbol + ((horizontal_symbol * cell_width) + corner_symbol) * columns\n ) + \"\\n\"\n horizontal_middle = horizontal_boundary.replace(horizontal_symbol, \" \").replace(\n corner_symbol, vertical_symbol\n )\n vertical_cells = (horizontal_middle * cell_height + horizontal_boundary) * rows\n print(\"\\n\")\n print(horizontal_boundary + vertical_cells)", "def rect(rows: int, cols: int, top: int = 0,\n left: int = 0) -> List['GridQubit']:\n return [\n GridQubit(row, col)\n for row in range(top, top + rows)\n for col in range(left, left + cols)\n ]", "def __init__(self) -> None:\n self.row = 6\n self.col = 7\n self.grid = []\n\n for y in range(self.row):\n temp_row = []\n for x in range(self.col):\n temp_row.append(\" \")\n self.grid.append(temp_row)", "def create_video_grid(vid_width, vid_height, block_dim):\n\n grid = []\n for i in range(0, vid_height, block_dim):\n grid_row = []\n for j in range(0, vid_width, block_dim):\n bottom_left_vertex = (j, i)\n bottom_right_vertex = (j + block_dim, i)\n top_right_vertex = (j + block_dim, i + block_dim)\n top_left_vertex = (j, i + block_dim)\n\n vertex_list = []\n vertex_list.append(bottom_left_vertex)\n vertex_list.append(bottom_right_vertex)\n vertex_list.append(top_right_vertex)\n vertex_list.append(top_left_vertex)\n\n path = Path(vertex_list)\n grid_row.append(path)\n grid.append(grid_row)\n return grid", "def generate_grid():\n y_offset = -10\n for a in range(20):\n # Line 1\n # Adds offset to the x position of the squares\n x_offset = 10\n for b in range(1):\n # Adds offset to the y position of the squares\n y_offset += 20\n for c in range(20):\n # Prints a row of squares(5 squares along the x)\n square(x_offset, y_offset, 20, 20, dark_green)\n for d in range(1):\n # Adds x offset for the next line of squares on the y axis\n x_offset += 40\n # Line 2 (needs 2 lines because the offset of each line)\n # Adds offset to the x position of the squares\n x_offset = 30\n for e in range(1):\n # Adds offset to the y position of the squares\n y_offset += 20\n for f in range(20):\n # Prints a row of squares(5 squares along the x)\n square(x_offset, y_offset, 20, 20, dark_green)\n for g in range(1):\n # Adds x offset for the next line of squares on the y axis\n x_offset += 40", "def images_square_grid(images, mode):\n # Get maximum size for square grid of images\n save_size = math.floor(np.sqrt(images.shape[0]))\n\n # Scale to 0-255\n images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)\n\n # Put images in a square arrangement\n images_in_square = np.reshape(\n images[:save_size*save_size],\n (save_size, save_size, images.shape[1], images.shape[2], images.shape[3]))\n if mode == 'L':\n images_in_square = np.squeeze(images_in_square, 4)\n\n # Combine images to grid image\n new_im = Image.new(mode, (images.shape[1] * save_size, images.shape[2] * save_size))\n for col_i, col_images in enumerate(images_in_square):\n for image_i, image in enumerate(col_images):\n im = Image.fromarray(image, mode)\n new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2]))\n\n return new_im", "def imshow_grid(images, shape=[2, 2], name='default', save=False):\n fig = plt.figure()\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n img = images[i]\n if img.shape[0]==3:\n img = img.transpose(1, 2, 0)\n img = (img - img.min())/(img.max() - img.min())\n grid[i].imshow(img, vmin=-132, vmax = 164) # The AxesGrid object work as a list of axes.\n\n plt.show()", "def create_grid(self, main_frame: tk.LabelFrame) -> None:\n for square_row in range(3):\n for square_column in range(3):\n square = tk.Frame(main_frame, highlightbackground='black', highlightcolor='red',\n highlightthickness=1, width=120, heigh=120, padx=0)\n square.grid(row=square_row, column=square_column)\n self.create_cells_and_entries(square, square_row)\n return None", "def plot_final_grid(generated_images):\n\n fig = plt.figure(figsize=(GRID_SIZE, GRID_SIZE))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.001, hspace=0.001)\n\n for ind in range(GRID_IMGS):\n ax = fig.add_subplot(GRID_SIZE, GRID_SIZE, ind + 1, xticks=[], yticks=[])\n ax.imshow(np.uint8(((generated_images[ind] + 1) / 2) * 255), cmap='gray')\n\n plt.savefig('/content/drive/My Drive/WGAN/generated_image_grid.png')\n plt.savefig('/content/drive/My Drive/WGAN/results/WGAN.png')\n plt.show()", "def _create_grid_with_cells(self, width, height):\n grid = []\n for row in range(height):\n grid.append([])\n for column in range(width):\n if column % 2 == 1 and row % 2 == 1:\n grid[row].append(TILE_EMPTY)\n elif (\n column == 0 or row == 0 or column == width - 1 or row == height - 1\n ):\n grid[row].append(TILE_CRATE)\n else:\n grid[row].append(TILE_CRATE)\n grid[-2][-3] = TILE_EMPTY\n grid[1][0] = TILE_EMPTY\n return grid", "def draw_grid(self, verbosity=0):\n log.debug(\"Drawing grid\")\n (x0, y0) = self.origin\n color = (191, 191, 191)\n\n (w, h) = self.surface.get_size()\n\n i = x0\n while True:\n (x, ignore) = self.map_to_screen((i, 0))\n if x > w:\n break\n pygame.draw.line(self.surface, color, (x, 0), (x, h), 1)\n i += 10\n\n j = y0\n while True:\n (ignore, y) = self.map_to_screen((0, j))\n if y > h:\n break\n pygame.draw.line(self.surface, color, (0, y), (w, y), 1)\n j -= 10", "def drawGrid(w, rows, surface):\r\n sizeBtwn = w // rows\r\n\r\n x = 0\r\n y = 0\r\n for l in range(rows):\r\n x = x + sizeBtwn\r\n y = y + sizeBtwn\r\n\r\n #line color-white #start end\r\n # pygame.draw.line(surface, (255,255,255), (x,0), (x,w)) #vertical\r\n #pygame.draw.line(surface, (255,255,255), (0,y), (w,y)) #horizontal\r", "def draw_grid(self):\n pygame.draw.rect(self.screen, BLACK,\n (*grid_pos, WIDTH - 150, HEIGHT-150), 2)\n for x in range(9):\n pygame.draw.line(\n self.screen,\n BLACK,\n (grid_pos[0] + (x * cell_size), grid_pos[1]),\n (grid_pos[0] + (x * cell_size), grid_pos[1] + 450),\n 2 if x % 3 == 0 else 1\n )\n pygame.draw.line(\n self.screen,\n BLACK,\n (grid_pos[0], grid_pos[1] + (x * cell_size)),\n (grid_pos[0] + 450, grid_pos[1] + (x * cell_size)),\n 2 if x % 3 == 0 else 1\n )", "def draw_grid(self) -> None:\n for x in range(0, WIDTH, TILE_SIZE):\n pg.draw.line(self.screen, LIGHT_GREY, (x, INFO_HEIGHT), (x, HEIGHT))\n for y in range(INFO_HEIGHT, INFO_HEIGHT + HEIGHT, TILE_SIZE):\n pg.draw.line(self.screen, LIGHT_GREY, (0, y), (WIDTH, y))", "def place_images(panel: plt.Axes, height_data: list) -> None:\n # left right bottom top\n if \"A_small.png\" in os.listdir():\n pass\n else:\n for file in \"ACGT\":\n img = Image.open(\"{}.png\".format(file))\n img = img.resize((img.size[0] // 4, img.size[1] // 4))\n img.save(\"{}_small.png\".format(file))\n pic_dic = {'A': mpimg.imread('A_small.png'), 'C': mpimg.imread('C_small.png'),\n 'G': mpimg.imread('G_small.png'), 'T': mpimg.imread('T_small.png')}\n for i, height_dict in enumerate(height_data):\n heights_sorted = sorted([[key, height_dict[key]] for key in height_dict.keys()], key=itemgetter(1))\n for j, base_value in enumerate(heights_sorted):\n if j is 0:\n bottom = 0\n else:\n bottom = sum(height_sort[1] for height_sort in heights_sorted[:j])\n panel.imshow(pic_dic[base_value[0].upper()], extent=[i - 10, i - 9, bottom, bottom + base_value[1]],\n aspect=\"auto\")\n\n return", "def plot_final_grid(generated_images):\n\n fig = plt.figure(figsize=(8, 6))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.001, hspace=0.001)\n\n for ind in range(GRID_IMGS):\n ax = fig.add_subplot(GRID_SIZE, GRID_SIZE, ind + 1, xticks=[], yticks=[])\n ax.imshow(np.uint8(((generated_images[ind] + 1) / 2) * 255), cmap='gray')\n\n plt.savefig('/content/drive/My Drive/WGAN/generated_image_grid.png')\n plt.savefig('/content/drive/My Drive/WGAN/results/DCGAN.png')\n plt.show()", "def create_grid(self):\n row = 0\n col = 0\n for row in range(self._dim):\n for col in range(self._dim):\n x1 = col*self._cell_dim # bottom left\n y1 = row * self._cell_dim # top left\n x2 = x1 + self._cell_dim # bottom right\n y2 = y1 + self._cell_dim # top right\n self.rect[row,col] = self.canvas.create_rectangle(x1,y1,x2,y2, fill=self._primary_color, outline=self._grid_lines_color, tags=\"rect\")\n self.canvas.tag_bind(self.rect[row, col], '<ButtonPress-1>', self.change_cell)\n col = 0\n row += 1\n if self._dim < 50:\n button_size = int(80*(self._dim/50))\n font_size = int(22*(self._dim/50))\n else:\n button_size = 80\n font_size = 18\n x1 = col * self._cell_dim + (((self._dim*self._cell_dim) - button_size*3)//2)\n y1 = row * self._cell_dim + 5\n x2 = x1 + button_size\n y2 = y1 + 20\n self.canvas.create_oval(x1,y1,x2,y2, tags=\"toggle\", fill=self._primary_color)\n self.canvas.create_text(x1+(button_size//2), y1+10, tags=\"toggle-text\", fill=self._secondary_color, text=\"Start\", font=(\"Courier\", font_size))\n self.canvas.tag_bind(\"toggle\", '<ButtonPress-1>', self.toggle_refresh)\n self.canvas.tag_bind(\"toggle-text\", '<ButtonPress-1>', self.toggle_refresh)\n x1 = x2 + 5 # padding between buttons\n x2 = x1 + button_size\n self.canvas.create_oval(x1,y1,x2,y2, tags=\"next\", fill=self._primary_color)\n self.canvas.create_text(x1+(button_size//2), y1+10, tags=\"next-text\", fill=self._secondary_color, text=\"Next\", font=(\"Courier\", font_size))\n self.canvas.tag_bind(\"next\", '<ButtonPress-1>', self.one_step)\n self.canvas.tag_bind(\"next-text\", '<ButtonPress-1>', self.one_step)\n x1 = x2 + 5 # padding between buttons\n x2 = x1 + button_size\n self.canvas.create_oval(x1,y1,x2,y2, tags=\"clear\", fill=self._primary_color)\n self.canvas.create_text(x1+(button_size//2), y1+10, tags=\"clear-text\", fill=self._secondary_color, text=\"Clear\", font=(\"Courier\", font_size))\n self.canvas.tag_bind(\"clear\", '<ButtonPress-1>', self.clear_board)\n self.canvas.tag_bind(\"clear-text\", '<ButtonPress-1>', self.clear_board)\n self.model_refresh()", "def make_grid(dataset):\n top_left_lat = dataset[\"a\"][0]\n top_left_lng = dataset[\"a\"][1]\n top_right_lng = dataset[\"c\"][1]\n bot_left_lat = dataset[\"b\"][0]\n\n lng_row = []\n lat_col = []\n i = top_left_lng\n while i < top_right_lng:\n lng_row.append(round(i, 5))\n i += step\n j = bot_left_lat\n while j < top_left_lat:\n lat_col.append(round(j, 5))\n j += step\n out_grid = []\n for i in lat_col:\n row = []\n for j in lng_row:\n row.append(\"{0}:{1}:0\".format(i, j))\n out_grid.append(row)\n return out_grid", "def draw_grid(self):\n plt.imshow(py.array(\n map(lambda x: map(lambda y: mplc.colorConverter.to_rgb(colord[y]), x), self.create_grid(self.graph))),\n interpolation='nearest')\n plt.show()", "def draw_grid(self):\n if self.grid_center == True:\n (n, m) = (self.n, self.m)\n (dx, dy) = (self.dx // 2, self.dy // 2)\n else:\n (n, m) = (self.n + 1, self.m + 1)\n (dx, dy) = (0, 0)\n\n x0 = self.x0 + dx\n y0 = self.y0 + dy\n\n # vertical lines\n for j in range(m):\n p0 = (x0 + j * self.dx, y0)\n p1 = (x0 + j * self.dx, y0 + (n-1) * self.dy)\n pygame.draw.line(self.screen, self.grid_col, p0, p1, self.grid_d) \n # horizontal lines\n for i in range(n):\n p0 = (x0, y0 + i * self.dy)\n p1 = (x0 + (m-1) * self.dx, y0 + i * self.dy)\n pygame.draw.line(self.screen, self.grid_col, p0, p1, self.grid_d)", "def gen_grids(self):\n self.dx = self.grid_width / self.grid_resol\n self.dk = 2 * np.pi/self.grid_width\n self.grid_x_shifted = -self.grid_width/2 + self.dx * np.arange(0, self.grid_resol)\n self.grid_x = self.grid_x_shifted + self.grid_center\n self.grid_k = - (np.pi * self.grid_resol)/self.grid_width + self.dk * np.arange(0, self.grid_resol)\n self.grid_k = np.roll(self.grid_k, int((self.grid_resol)/2))\n self.grid_kin = np.square(self.h)/ (2*self.m) * np.square(self.grid_k)", "def createGridcells(mapdata, listOfP):\n new_gridcells = GridCells()\n new_gridcells.header = mapdata.header\n new_gridcells.cell_width = mapdata.info.resolution\n new_gridcells.cell_height = mapdata.info.resolution\n new_gridcells.cells = []\n for p in listOfP:\n new_gridcells.cells.append(PathPlanner.grid_to_world(mapdata, p[0], p[1]))\n return new_gridcells", "def draw_grid(self, tile_img, tiles):\n #debug_print(\"drawing level\", data)\n img = Surface((self.xsize * SIZE, self.ysize * SIZE))\n for pos, char in self:\n rect = get_tile_rect(pos)\n img.blit(tile_img, rect, tiles[char])\n return img", "def draw_grid(grid_display, game_grid, box_width, box_height, border_color):\n\n for x in range(0, len(game_grid)):\n for y in range(0, len(game_grid[0])):\n if x == 0 or x == len(game_grid) - 1 or y == 0 or y == len(game_grid[0]) - 1:\n pygame.draw.rect(grid_display, border_color, (x * box_width + 1, y * box_height + 1,\n box_width - 1, box_height - 1))\n elif game_grid[x][y]:\n pygame.draw.rect(grid_display, game_grid[x][y], (x * box_width + 1, y * box_height + 1,\n box_width - 1, box_height - 1))", "def _createGrid(self, dimensions, density):\n import math\n\n xmin, xmax = dimensions[0], dimensions[1]\n imin, imax = dimensions[2], dimensions[3]\n\n hsteps = math.ceil((xmax - xmin)/density)\n vsteps = math.ceil((imax - imin)/density)\n\n hgrids = int(math.ceil(hsteps/self.gridsize))\n vgrids = int(math.ceil(vsteps/self.gridsize))\n\n grid_inc = density * self.gridsize\n \n #Add one inside the range() because you want to include the last one\n horizontal = [[xmin + (x * grid_inc), xmin + ((x+1) * grid_inc)] for x in range(hgrids)]\n vertical = [[imin + (im * grid_inc), imin + ((im+1) * grid_inc)] for im in range(vgrids)]\n\n #This makes the negative to positive less confusing, positive is at index = 0\n vertical.reverse()\n\n grid_map = []\n\n for im in vertical:\n temp = []\n for x in horizontal:\n my_x = list(x)\n my_x.extend(im)\n temp.append(my_x)\n grid_map.append(temp)\n\n return grid_map", "def create_board(self):\n canvas = tk.Canvas(master=self.panel_mid, width=530, height=550)\n canvas.configure(scrollregion=(self.offset_x, self.offset_y, 20, 20))\n\n # x1 y1 x2 y2\n for i in range(8):\n y = i * self.width\n for j in range(8):\n x = j * self.width\n if ((j + 1) % 2) == 0:\n if ((i + 1) % 2) == 0:\n canvas.create_rectangle(x, y, x + self.width, y + self.width,\n outline=\"#808080\", fill=\"#fff\") # biela\n else:\n canvas.create_rectangle(x, y, x + self.width, y + self.width,\n outline=\"#808080\", fill=\"#999\") # cierna\n else:\n if ((i + 1) % 2) == 1:\n canvas.create_rectangle(x, y, x + self.width, y + self.width,\n outline=\"#808080\", fill=\"#fff\") # biela\n else:\n canvas.create_rectangle(x, y, x + self.width, y + self.width,\n outline=\"#808080\", fill=\"#999\") # cierna\n\n return canvas", "def show(self):\n data = []\n for row in self.grid:\n mid, bottom = [], []\n for node in row:\n \tmid += [0, int(node.right)]\n \tbottom += [int(node.down), 1]\n data += mid + [0] + bottom + [0] \n data[self.width*2+1] = 1\n data[-1] = 1\n data += (self.width*2) * [0]\n im = Image.new('1', (self.width*2+1, self.height*2+1))\n im.putdata(data)\n im.save('maze.png')\n im.show()", "def draw_grid(self):\n self.screen.draw_many_tiles(tile for tile in self.iter_grid_tiles())\n pass", "def make_grid(tensors, nrow=2, padding=2, isNormalized=True):\n grid = tv.utils.make_grid(tensor=tensors.detach().cpu(),\n nrow=nrow,\n padding=padding,\n normalize=(not isNormalized))\n if isNormalized:\n ndgrid = grid.mul(255).add_(0.5).clamp_(0, 255).permute(\n 1, 2, 0).numpy().astype(np.uint16)\n else:\n ndgrid = grid.clamp_(0, 255).permute(1, 2, 0).numpy().astype(np.uint16)\n return ndgrid", "def build_grid(tiles, tile_size, grid_rows=None, grid_cols=None):\n if grid_rows is None or grid_cols is None:\n grid_rows = int(math.sqrt(len(tiles)))\n grid_cols = int(math.ceil(len(tiles) / grid_rows))\n\n grid = np.zeros(\n (grid_rows * tile_size[1], grid_cols * tile_size[0], 3), np.uint8)\n for tile_id, tile in enumerate(tiles):\n assert(tile.shape[0] == tile_size[1] and tile.shape[1] == tile_size[0])\n yy = int(tile_id / grid_cols)\n xx = tile_id % grid_cols\n grid[(yy * tile_size[1]):((yy + 1) * tile_size[1]),\n (xx * tile_size[0]):((xx + 1) * tile_size[0]), :] = tile\n return grid", "def _generate_images(self, trace):\n images = []\n colors = []\n colors_by_shape = {}\n for board in trace:\n width = int(round((float(board.shape[1]) / board.shape[0]) * self._height))\n cellsize = width / board.shape[1] # cell size\n img = np.zeros((self._height, width, 3), dtype=np.uint8)\n\n tiles = {} # map from integer rep. of the tile to a shape\n for y in range(board.shape[0]):\n for x in range(board.shape[1]):\n cell = board[y,x]\n if cell not in tiles:\n tiles[cell] = (x, y, 1, 1) # x, y, w, h\n else:\n cur_x, cur_y, cur_w, cur_h = tiles[cell]\n if x >= cur_x + cur_w:\n cur_w = (x-cur_x) + 1\n if y >= cur_y + cur_h:\n cur_h = (y-cur_y) + 1\n tiles[cell] = (cur_x, cur_y, cur_w, cur_h)\n\n # Colors\n if len(colors_by_shape) == 0:\n for tid in tiles:\n shape = (tiles[tid][2], tiles[tid][3])\n if shape not in colors_by_shape:\n colors_by_shape[shape] = hex_to_rgb(random_unique_color(colors))\n colors.append(colors_by_shape[shape])\n\n for tid in tiles:\n x, y, w, h = tiles[tid]\n shape = (w,h)\n empty = board[y,x] == 0\n x, y, w, h = x*cellsize, y*cellsize, w*cellsize, h*cellsize\n # Draw a filled rectangle without color\n if not empty:\n cv2.rectangle(img, (x, y), (x+w, y+h), colors_by_shape[shape],-1)\n else:\n cv2.rectangle(img, (x, y), (x+w, y+h), [0,0,0], -1) #, 8)-\n # Draw a boundary\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 0), 2, 8)\n \n images.append(img)\n return images", "def construct_image(imgs):\n\n # todo fill missing pieces and\n\n if len(imgs) == 0:\n return None\n # taking the first\n w, h = imgs[0][1].size\n img_array = order_2d(imgs)\n x_count = len(img_array[0])\n y_count = len(img_array)\n height = h * y_count\n width = w * x_count\n new_im = Image.new('RGB', (width, height))\n for y in range(y_count):\n for x in range(x_count):\n _, im = img_array[y][x]\n new_im.paste(im, (x * w, y * h))\n return new_im", "def draw_room(screen, grid, start_location):\n wall_image = pygame.image.load(\"images/pillar.png\")\n wall_image_transparent = pygame.image.load(\"images/pillar_80.png\")\n floor_image = pygame.image.load(\"images/floor.png\")\n computer_image = pygame.image.load(\"images/desk_computer.png\")\n\n # map_to_image = [floor_image, # 0\n # wall_image, # 1\n # wall_image_transparent, # 2\n # computer_image] # 3\n map_to_image = {\n \"0\": floor_image,\n \"1\": wall_image,\n \"2\": wall_image_transparent,\n \"3\": computer_image,\n \"10\": wall_image # Secret passage\n }\n # better tile management for multiple environments / create multiple environments.\n # 0 = floor, 1 = wall (pillar)\n # First draw floor everywhere\n max_dimensions = grid.shape\n for r in range(max_dimensions[0]):\n for c in range(max_dimensions[1]):\n screen.blit(floor_image, (c * 30 + start_location[0],\n r * 30 + start_location[1]))\n\n for tile_type in [1, 2, 3, 10]:\n the_rows, the_cols = np.where(grid == tile_type)\n for i in range(len(the_cols)):\n screen.blit(map_to_image[str(tile_type)], (the_cols[i] * 30 + start_location[0],\n the_rows[i] * 30 + start_location[1]))", "def make_grid_bbox(tensor, box, nrow=8, padding=2,\n normalize=False, range=None, \n scale_each=False, pad_value=0, draw_line=False):\n\n # make the mini-batch of images into a grid\n # nmaps = tensor.size(0)\n nmaps = len(box)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n # height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n height, width = int(256 + padding), int(256 + padding)\n tensor = torch.ones(())\n grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value)\n # # add the white image into the grid\n # block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n # add the white image into the grid\n block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n # print(box[0].size())\n # print(box[1].size())\n # assert False\n # num_curr_box = box[0][k].size(0)\n num_curr_box = box[k][0].size(0)\n for z in irange(num_curr_box):\n # label = box[1][k][z].item()\n try:\n label = box[k][1][z].item()\n except:\n print(box)\n print(k)\n assert False\n \n if label != -1:\n block = draw_box(block, box[k][0][z], label, draw_line)\n # print(k, z)\n else:\n break\n # copy to the grid\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(block)\n k = k + 1\n return grid", "def define_grid(self):\n self.h_shape = int(\n np.round((self.h_stop - self.h_start) / self.h_step, 2)) + 1\n self.k_shape = int(\n np.round((self.k_stop - self.k_start) / self.k_step, 2)) + 1\n self.l_shape = int(\n np.round((self.l_stop - self.l_start) / self.l_step, 2)) + 1\n self.grid_origin = [self.h_start, self.k_start, self.l_start]\n self.grid_step = [int(np.rint(1.0/self.h_step)),\n int(np.rint(1.0/self.k_step)),\n int(np.rint(1.0/self.l_step))]\n self.grid_shape = [self.h_shape, self.k_shape, self.l_shape]\n self.grid_basis = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]", "def create_board(self, size):\n x = np.arange(0, size[0])\n y = np.arange(0, size[1])\n board = np.meshgrid(x, y)\n return board", "def define_grid():\n grid_left = np.array([[-13.1000000000000, -35.5000000000000, -48.3000000000000, -60, -16.9000000000000,\n -34.8000000000000, -67.5000000000000, -46.1000000000000, -59.8000000000000,\n -14.2000000000000, -28.3000000000000, -42.3000000000000, -67.6000000000000,\n -50.5000000000000, -14.6000000000000, -60.9000000000000, -31.6000000000000,\n -5.10000000000000, -65.6000000000000, -41.8000000000000, -55.1000000000000,\n -22.7000000000000, -5.80000000000000, -49.2000000000000, -34.5000000000000,\n -61.5500000000000, -63.6000000000000, -40.4000000000000, -48.7000000000000,\n -21.8000000000000, -58.2000000000000, -7, -36.3000000000000, -48.1000000000000,\n -56.8000000000000, -7.30000000000000, -22.2000000000000, -36.8000000000000,\n -46.8000000000000],\n [-67.7000000000000, -60, -55.1000000000000, -51.8000000000000, -51.6000000000000,\n -49.3000000000000, -47.1000000000000, -43.7000000000000, -39.6000000000000,\n -39.1000000000000, -31.2000000000000, -30.7000000000000, -30.1000000000000,\n -24.4000000000000, -22.7000000000000, -18.7000000000000, -16.9000000000000,\n -12.6000000000000, -10.8000000000000, -10.2000000000000, -4.01000000000000, 1.20000000000000,\n 2.80000000000000, 3.70000000000000, 3.90000000000000, 6.20000000000000, 8.30000000000000,\n 11.8000000000000, 14.5000000000000, 16, 18.2000000000000, 18.4000000000000, 19.9000000000000,\n 24.6000000000000, 28.5200000000000, 33.8000000000000, 35, 35.4000000000000,\n 35.6000000000000],\n [69.1000000000000, 66, 58.2000000000000, 48, 78, 71.7000000000000, 31, 61.1000000000000,\n 53.3000000000000, 81.1000000000000, 76, 70.2000000000000, 41.2000000000000, 64.4000000000000,\n 80.2000000000000, 50.9000000000000, 75.2000000000000, 77.3000000000000, 37.8000000000000, 67,\n 53.2000000000000, 72, 74.8000000000000, 54.7000000000000, 66.5000000000000, 35.9000000000000,\n 25.7000000000000, 60.7000000000000, 50.5000000000000, 68.9000000000000, 27.3000000000000,\n 70.3000000000000, 59.6000000000000, 44, 20.8000000000000, 61.7000000000000, 57.2000000000000,\n 47, 36]])\n stn_left = np.array([[-14.6, -13.2, -11.7, -9.10, -11.7, -13.2, -7.90, -10],\n [-15.1, -15.1, -15.1, -12.6, -12.6, -12.6, -9.40, -10.1],\n [-5.40, -7.20, -8.70, -8.70, -7.50, -5.10, -10.3, -7.80]])\n grid_right = np.copy(grid_left)\n grid_right[0, :] = grid_right[0, :] * -1\n stn_right = np.copy(stn_left)\n stn_right[0, :] = stn_right[0, :] * -1\n\n return grid_left, grid_right, stn_left, stn_right", "def inner_grid(height, width, digit):\n grid = create_grid(height, width)\n for r in range(1, height - 1):\n for c in range(1, width - 1):\n grid[r][c] = digit\n return grid", "def visualize(grid, board_size=16):\n visual_grid = []\n for i in range(board_size):\n row = []\n for j in range(board_size):\n row.append(grid[(j, i)])\n visual_grid.append(row)\n print(visual_grid)", "def create_initial_grid():\n\n\tgrid = {(x, y) : ' + ' for x in range(8) for y in range(8)}\n\n\t# Define initial positions \n\tgrid[(3,3)] = colors.RED + \"[I]\" + colors.STOP\n\tgrid[(4,3)] = colors.GREEN + \"[A]\" + colors.STOP\n\tgrid[(3,4)] = colors.GREEN + \"[A]\" + colors.STOP\n\tgrid[(4,4)] = colors.RED + \"[I]\" + colors.STOP\n\n\treturn grid", "def drawGrid(self,gridLines=True):\n if not self.changed: self.edit()\n cGrid = Fmap.GRID\n cBorder = Fmap.BORDER\n if gridLines: #--Some fools don't want the grid!\n #--Grid\n for uv in range(-25,26,5):\n xy = 512/2 - 9*uv + 4\n self.drawRect(cGrid,0,xy,512,xy+1)\n self.drawRect(cGrid,xy,0,xy+1,512)\n #--Grid axes\n xy = 512/2 + 4\n self.drawRect(cBorder,0,xy,512,xy+1)\n self.drawRect(cBorder,xy,0,xy+1,512)\n #--Border\n self.drawBorder(cBorder,0,0,512,512,4)", "def _plot_grid(frames: Figure, ncols: int = 3) -> Figure:\n for frame in frames:\n frame.plot_height = frame.plot_height // ncols\n frame.plot_width = frame.plot_width // ncols\n return gridplot(frames, ncols=ncols)", "def display_grid(grid):\n\n\tprint(\"\"\"\n 0 1 2 3 4 5 6 7\n\t \n ▼ ▼ ▼ ▼ ▼ ▼ ▼ ▼ \"\"\", colors.BOLD + \"(X)\" + colors.STOP, end = '')\n\n\tprint('\\n\\n')\n\n\trow = 0\n\n\tfor i in range(8):\n\t\tprint(' ', row, ' ▶ ', end = ' ')\n\t\tfor j in range(8):\n\t\t\tprint(grid[j,i], end = ' ')\n\t\tprint('\\n\\n')\n\t\trow += 1\n\n\tprint(colors.BOLD + ' (Y)\\n' + colors.STOP)", "def grid_04():\n plot = {\"Walls\": [\"N\", \"S\", \"W\"], \"TARDIS\": False, \"Transmat\": False,\n \"Plot\": f'\\nEerie blue lights lit the cold corridors. To the NORTH, SOUTH, and WEST are solid metal walls.\\n'}\n return plot", "def make_grid(N):\n\n x = np.linspace(-2. , 2 , N)\n y = np.linspace(-2. , 2 , N)\n # two evenly spaced grids from -2 to 2\n\n return x, y", "def populate_board(self):\n for key, value in self.game.white_pieces.items():\n x_pos = self.width * value.x_pos\n y_pos = self.width * value.y_pos\n img = self.load_image(\"images/\" + value.image, value.starting_position)\n self.place_image_on_canvas(x_pos, y_pos, img, \"images/\" + value.image, value.starting_position)\n for key, value in self.game.black_pieces.items():\n x_pos = self.width * value.x_pos\n y_pos = self.width * value.y_pos\n img = self.load_image(\"images/\" + value.image, value.starting_position)\n self.place_image_on_canvas(x_pos, y_pos, img, \"images/\" + value.image, value.starting_position)", "def create_empty_grid(width, height):\n return [[None] * width for _ in range(height)]" ]
[ "0.7382325", "0.7234491", "0.71866596", "0.708115", "0.70502263", "0.7038171", "0.6941421", "0.69232905", "0.68283314", "0.6826088", "0.68064976", "0.67884314", "0.6752977", "0.6732644", "0.67231864", "0.6684483", "0.66807157", "0.6656162", "0.66169745", "0.65663916", "0.6561653", "0.65604335", "0.6547847", "0.6546493", "0.6521471", "0.6508047", "0.64983076", "0.64851636", "0.64844537", "0.6465039", "0.6462103", "0.6461146", "0.64532185", "0.6450081", "0.6448128", "0.6448128", "0.644775", "0.6428161", "0.64278644", "0.64118063", "0.6395002", "0.6388304", "0.6378135", "0.63736045", "0.6371471", "0.6345007", "0.63414", "0.6339131", "0.6338049", "0.6334834", "0.63328665", "0.6320677", "0.6319932", "0.6316417", "0.631068", "0.63099813", "0.630505", "0.6297766", "0.6295495", "0.6293713", "0.6290055", "0.62830096", "0.62674963", "0.6250331", "0.6247666", "0.62415904", "0.6234074", "0.62281513", "0.6223039", "0.6215271", "0.62140906", "0.61913633", "0.6182257", "0.6181361", "0.617906", "0.6173272", "0.6163202", "0.6153589", "0.6148237", "0.6145428", "0.61383843", "0.61320114", "0.6126562", "0.6126488", "0.6115262", "0.6113566", "0.61089134", "0.6098154", "0.6092535", "0.60914195", "0.60637724", "0.60532457", "0.6051669", "0.60496974", "0.6049056", "0.6048409", "0.60461545", "0.6044862", "0.6031201", "0.6019904", "0.60148627" ]
0.0
-1
validate_target verifies that target is a valid MAC address, IP address or hostname
def validate_target(target, arp_table): try: mac = mac_address(target) return mac except TypeError: pass try: ip = ip_address(target) if ip in arp_table.keys(): return arp_table[ip].mac except TypeError: pass if target in arp_table: return arp_table[target].mac else: raise TypeError('{} is not a valid target'.format(target))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_target(target: str) -> bool:\n try:\n gethostbyname(target)\n except (gaierror, UnicodeError):\n return False\n return True", "def validateIP():\n try:\n s = socket.inet_aton(args.target)\n except socket.error:\n print(\"\")\n print(f\"{bad_cmd} Bad IP address\")\n print(\"\")\n sys.exit()", "def target_is_valid(self, target_id=0):\n try:\n target = self.target(target_id=target_id)\n except:\n return False\n return target['state'] != \"invalid\"", "def valid(self, target):", "def verify_as_host(self, target, message_handler):\n\n # Check we can host the target.\n if not self.supported_target(target, message_handler):\n raise UserException(\n \"{0} is not a supported {1} development host\".format(\n self.name, target.name))", "def _is_valid_target(hostname):\n if not hostname:\n return False\n\n # Check if it's a valid IP\n if _is_valid_ipv4_address(hostname) or _is_valid_ipv6_address(hostname):\n return True\n\n # Check if it's a valid DNS name\n\n if hostname[-1] == '.':\n hostname = hostname[:-1] # strip exactly one dot from the right, if present\n if len(hostname) < 1 or len(hostname) > 253: # Technically 255 octets but 2 are used for encoding\n return False\n\n labels = hostname.split(\".\")\n\n # the TLD must be not all-numeric\n if re.match(r\"[0-9]+$\", labels[-1]):\n return False\n\n allowed = re.compile(r\"(?!-)[a-z0-9-]{1,63}(?<!-)$\", re.IGNORECASE)\n return all(allowed.match(label) for label in labels)", "def target_validation(target_name, action):\n json_data = read_file('presqt/specs/targets.json', True)\n for data in json_data:\n if data['name'] == target_name:\n if data[\"supported_actions\"][action] is False:\n raise PresQTValidationError(\n \"PresQT Error: '{}' does not support the action '{}'.\".format(target_name, action),\n status.HTTP_400_BAD_REQUEST)\n return True, data['infinite_depth']\n else:\n raise PresQTValidationError(\n \"PresQT Error: '{}' is not a valid Target name.\".format(target_name), status.HTTP_404_NOT_FOUND)", "def test_target_resembles_ip(self):\n for fqdn in ('10.234.30.253', '128.193.0.3', 'fe80::e1c9:1:228d:d8'):\n with self.assertRaises(ValidationError):\n self.create_ptr(ip_str='128.193.0.2', fqdn=fqdn,\n ip_type='4')", "def _validator_target(self, field, value):\n if not REG.match(value):\n self._error(field, \"{} is not a valid target\".format(value))", "def check(self, target, port):\n pass", "def test_target_existence(self):\n self.create_ptr(\n ip_str='128.193.0.2', fqdn='nonexistent.oregonstate.edu',\n ip_type='4')", "def supported_target(self, target, message_handler):\n\n # iOS can never be a host.\n return False", "def validate_target(data, handshake):\n\n if data['header'] != handshake:\n END_POINT({\n 'status': 'invalid-handshake',\n 'handshake': handshake,\n })\n comment('handshake: %r' % data['header'])\n\n # Import all requested modules\n for mod in data.get('imports', ()):\n importlib.import_module('boxed')\n try:\n importlib.import_module(mod)\n except ImportError:\n END_POINT({\n 'status': 'invalid-import',\n 'module': mod,\n })\n comment('all modules successfully imported')\n\n # If the target attribute is a callable, simply return it\n target = data['target']\n if callable(target):\n return target\n\n # If it is a path string, we load the proper target function in the given\n # location.\n mod, _, func = data['target'].rpartition('.')\n try:\n mod = importlib.import_module(mod)\n target = getattr(mod, func)\n except ImportError as ex:\n END_POINT({\n 'status': 'invalid-target',\n 'message':\n 'could not import module %r. Maybe it must be passed it to '\n 'the \"imports\" argument.' % mod,\n })\n except AttributeError:\n END_POINT({\n 'status': 'invalid-target',\n 'message':\n 'could not find function \"%s\" in module %s' % (func, mod),\n })\n comment('target function loaded as %s' % funcname(target))\n return target", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def valid_target(start, target, words):\r\n if target.isalpha(): # target word must be alphabetic\r\n if len(start) == len(target): # target word must be same size as start word\r\n if start != target: # target and start words must be different\r\n if target in words: # target word must be in the list of words\r\n return \"0\"\r\n else:\r\n return \"Target word not in list of words....please reenter\"\r\n else:\r\n return \"Target word must be different from Start word....please reenter\"\r\n else:\r\n return \"Target word must be same length as Start word....please reenter\"\r\n else:\r\n return \"Target word must contain only letters....please reenter\"", "def valid_mikettle_mac(mac, pat=re.compile(r\"[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}\")):\n if not pat.match(mac.upper()):\n raise argparse.ArgumentTypeError('The MAC address \"{}\" seems to be in the wrong format'.format(mac))\n return mac", "def transfer_target_validation(source_target, destination_target):\n json_data = read_file('presqt/specs/targets.json', True)\n\n for data in json_data:\n if data['name'] == source_target:\n if destination_target not in data['supported_transfer_partners']['transfer_out']:\n raise PresQTValidationError(\n \"PresQT Error: '{}' does not allow transfer to '{}'.\".format(\n source_target, destination_target),\n status.HTTP_400_BAD_REQUEST)\n\n elif data['name'] == destination_target:\n if source_target not in data['supported_transfer_partners']['transfer_in']:\n raise PresQTValidationError(\n \"PresQT Error: '{}' does not allow transfer from '{}'.\".format(\n destination_target, source_target),\n status.HTTP_400_BAD_REQUEST)\n\n return True", "def validate_target(func, *args, **kwargs):\n def inner(self, *args, **kwargs):\n # find the target param\n target_id = None\n if 'target_id' in kwargs and kwargs['target_id'] != None:\n target_id = kwargs['target_id']\n else:\n target_id = 0\n\n # if there was a target specified, check that it's valid\n if not self.target_is_valid(target_id):\n raise NoSuchTargetException()\n\n # call the function\n return func(self, *args, **kwargs)\n return inner", "def verify_as_target(self, message_handler):", "def verify_as_target(self, message_handler):\n\n self.platform.verify_as_target(message_handler)", "def _validate_rule_target_name(name: str) -> None:\n if not name:\n raise common_exceptions.RuleTargetValidationError(\n \"A `name` field must be supplied.\"\n )", "def test_host_validation(runner: CliRunner) -> None:\n invalid_res = runner.invoke(cli.main, [\"-b\", \"1.2.3.4.5\"])\n assert invalid_res.exit_code == 2\n assert 'Invalid value for \"-b\" / \"--bind-address\"' in invalid_res.output\n assert \"'host' is invalid in configuration\" in invalid_res.output", "def supported_target(self, target, message_handler):\n\n # This default implementation checks that the architectures are the\n # same.\n return target is self", "def supported_target(self, target, message_handler):\n\n # Android can never be a host.\n return False", "def identifyTargetType(self, target):\n ipAddress = re.compile('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}')\n ipFind = re.findall(ipAddress, target)\n if ipFind is not None and len(ipFind) > 0:\n return \"ip\"\n\n md5 = re.compile('[a-fA-F0-9]{32}', re.IGNORECASE)\n md5Find = re.findall(md5,target)\n if md5Find is not None and len(md5Find) > 0:\n return \"md5\"\n\n return \"hostname\"", "def test_debugger_delete_invalid_target(self):\n target = lldb.SBTarget()\n self.assertFalse(target.IsValid())\n self.dbg.DeleteTarget(target)", "def _target_is_valid_filename(self):\n filename = self.target\n if not filename_is_valid(filename):\n raise BadFilenameError(f\"{repr(filename)} must be a valid filename.\")\n return True", "def test_validate_ip_ok():\n ip = '1.1.1.1'\n assert howisresolved.validate_ip(ip) is None", "def _validate_parameters(self):\n self.target_metric = get_formatted_target_metric(\n self.target_metric, G.Env.metrics, default_dataset=\"oof\"\n )", "def validate_host(host):\n # FIXME: not convinced that pinging the machine is a good choice. it's definitely not needed for localhost\n route = os.system(\"ping -t 2 -c 1 \" + host)\n\n if route != 0:\n raise errs.IpError(host, 'Could not ping host: %s' % (host))\n\n try:\n # FIXME: i don't think there is any point in converting to ip address. socket.connect seems to handle machine names just fine and this is preferable since it is more human readable\n host = socket.gethostbyname(host)\n # FIXME: i don't think this line is doing anything. the previous line will error on an invalid name or malformed ip\n socket.inet_aton(host)\n except socket.error:\n raise errs.IpError(host, 'Please specify a valid host: %s' % (host))", "def valid_mitemp_mac(mac, pat=re.compile(r\"4C:65:A8:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}\")):\n if not pat.match(mac.upper()):\n raise argparse.ArgumentTypeError('The MAC address \"{}\" seems to be in the wrong format'.format(mac))\n return mac", "def validate_matching_target_name(target_filename, target_obj, inventory_path):\n logger.debug(\"validating target name matches the name of yml file %s\", target_filename)\n try:\n target_name = target_obj[\"vars\"][\"target\"]\n except KeyError:\n error_message = (\n f'Target missing: target \"{target_filename}\" is missing parameters.kapitan.vars.target\\n'\n \"This parameter should be set to the target name\"\n )\n raise InventoryError(error_message)\n\n if target_filename != target_name:\n target_path = os.path.join(os.path.abspath(inventory_path), \"targets\")\n\n error_message = (\n f'Target \"{target_name}\" is missing the corresponding yml file in {target_path}\\n'\n \"Target name should match the name of the target yml file in inventory\"\n )\n raise InventoryError(error_message)", "def verify_as_target(self, message_handler):\n\n super().verify_as_target(message_handler)\n\n # Set the various property values.\n ndk_root = self.platform.android_ndk_root\n android_api = self.platform.android_api\n toolchain_prefix = self.android_toolchain_prefix\n hosts = {\"win32\": \"windows\", \"darwin\": \"darwin\"}\n host = hosts.get(sys.platform) or \"linux\"\n android_host = '{}-x86_64'.format(host)\n\n # Check the toolchain bin directory.\n self.android_toolchain_bin = os.path.join(ndk_root, 'toolchains',\n 'llvm', 'prebuilt', android_host, 'bin')\n\n self.platform.android_check_exists(self.android_toolchain_bin)\n\n # Check the compiler.\n self.android_toolchain_cc = '{}{}-clang'.format(self.clang_prefix,\n android_api)\n\n self.platform.android_check_exists(\n os.path.join(self.android_toolchain_bin,\n self.android_toolchain_cc))", "def is_gentarget(self, target):\r\n raise NotImplementedError", "def check_ip_format(self, ip_address):\n # regex for validating an Ip-address \n ip_regex = \"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\/([0-9]|[1-2][0-9]|3[0-2]))?$\"\n\n # validate ip address\n r = re.compile(ip_regex)\n if(r.match(ip_address)):\n print(\"Valid IP address format\")\n self.target = ip_address\n return True\n else:\n print(R + \"{} is an invalid IP address format\".format(ip_address) + W)\n return False", "def test_cli_secret_validate_targets(self):\n argv = [\"kapitan\", \"secrets\", \"--validate-targets\",\n \"--secrets-path\", \"examples/kubernetes/secrets/targets/\",\n \"--inventory-path\", \"examples/kubernetes/inventory/\"]\n with self.assertRaises(SystemExit) as cm:\n sys.argv = argv\n main()\n argv[0] = BINARY_PATH\n result = subprocess.run(argv, stdout=subprocess.PIPE)\n self.assertEqual(cm.exception.code, result.returncode)", "def test_ipv4_validation_success():\n assert is_ipv4('8.8.8.8')", "def validate_host(self, host: str) -> bool:\n ip_address_regex = re.compile(r'^(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\\.){3}'\n r'(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])$')\n hostname_regex = re.compile(r'^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+'\n r'[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$')\n url_regex = re.compile(r'^(ldaps?)://'\n r'((?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+'\n r'[a-z0-9][a-z0-9-]{0,61}[a-z0-9]):'\n r'([0-9]{1,5})$')\n if bool(ip_address_regex.match(host)):\n # using ipv4 address\n valid = True\n elif bool(hostname_regex.match(host)):\n # using a hostname address\n valid = True\n elif bool(url_regex.match(host)):\n # using host url address\n match = url_regex.match(host)\n proto = match.group(1)\n if proto == 'ldaps':\n self.server_use_ssl = True\n valid = True\n else:\n # unsupported host format\n valid = False\n return valid", "def test_verify_connection_to_a_device():", "def validate_serial_port(target):\n found = False\n detail = \"\"\n ser_ports = [tuple(port) for port in list(serial.tools.list_ports.comports())]\n for port in ser_ports:\n if target == port[0]:\n found = True\n if 'USB VID:PID=0403:6001' in port[2] and target == port[0]:\n detail = \"Serial FTDI FT232 (RS485/RS422/RS232) on {port}\".format(port=port[0])\n elif 'USB VID:PID=067B:2303' in port[2] and target == port[0]:\n detail = \"Serial Prolific PL2303 (RS232) on {port}\".format(port=port[0])\n elif target == port[0]:\n usb_id = str(port[2])\n detail = \"Serial vendor/device {id} on {port}\".format(id=usb_id, port=port[0])\n if not found and len(ser_ports) > 0:\n for port in ser_ports:\n detail += \", {}\".format(port[0]) if len(detail) > 0 else \" {}\".format(port[0])\n detail = \"Available ports:\" + detail\n return found, detail", "def getGwIp(target):\n tmp = target.split('.')\n try:\n gw = (tmp[0] + \".\" + tmp[1] + \".\" + tmp[2] + \".1\")\n except IndexError:\n print(bcolors.FAIL + \" Invalid IP provided: \" + target + bcolors.ENDC)\n return False\n return gw", "def test_validate_tgt_returns_true_when_no_valid_minions_have_been_found():\n ckminions = salt.utils.minions.CkMinions(opts={})\n with patch(\n \"salt.utils.minions.CkMinions.check_minions\", autospec=True, return_value={}\n ):\n result = ckminions.validate_tgt(\"fnord\", \"fnord\", \"fnord\", minions=[])\n assert result is True", "def _is_valid_target_int(self, target):\n if isinstance(target, (int, np.int, np.int8, np.int16, np.int32, np.int64)):\n return True\n else:\n return False", "def is_valid_ip(ip):\n ...", "def check_ball_on_target():\n\n pass", "def verify_as_target(self, message_handler):\n\n super().verify_as_target(message_handler)\n\n if self.msvc_target() != '32':\n raise UserException(\"MSVC is not configured for a 32-bit target\")", "def is_valid_mac(address):\n m = \"[0-9a-f]{2}([-:])[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$\"\n if isinstance(address, six.string_types) and re.match(m, address.lower()):\n return True\n return False", "def test_verify_state_of_a_device():", "def is_targeted(self, targets):\n\n if targets:\n if isinstance(targets, str):\n # See if the string is a '|' separated list of targets.\n targets = targets.split('|')\n if len(targets) == 1:\n # There was no '|' so restore the original string.\n targets = targets[0]\n\n if isinstance(targets, str):\n # String targets can come from the project file (ie. the user)\n # and so need to be validated.\n if targets.startswith('!'):\n # Note that this assumes that the target is a platform\n # rather than an architecture. If this is incorrect then\n # it is a bug in the meta-data somewhere.\n platform = Platform.platform(targets[1:])\n covered = (self.platform is not platform)\n elif '-' in targets:\n architecture = Architecture.architecture(targets)\n covered = (self is architecture)\n else:\n platform = Platform.platform(targets)\n covered = (self.platform is platform)\n else:\n covered = (self.platform.name in targets)\n else:\n covered = True\n\n return covered", "def validateSemantic(cls,corpus,target):\n printMessage(cls,inspect.stack()[0][3],\n \"Validating against '%s' semantics..\"%(target))\n\n if target==\"new\":\n testfunction = Validator.isValidNew\n elif target==\"relaxed\":\n testfunction = Validator.isValidRelaxed\n elif target==\"compatible\":\n testfunction = Validator.isValidCompatible\n else:\n printError(cls,inspect.stack()[0][3],\"Cannot validate '%s' format\"%target)\n return(False)\n \n valid = testfunction(corpus)\n if valid:\n printMessage(cls,inspect.stack()[0][3],\"Valid semantics\")\n else:\n printError(cls,inspect.stack()[0][3],\"Invalid semantics\")\n return(valid)", "def addTargets(v):\n if v.spoof:\n print(\" \" + bcolors.WARNING + \"Turn off spoofer first\" + bcolors.ENDC)\n time.sleep(1)\n return\n try:\n target = input(\" Enter IP address of targets separated with spaces: \")\n except KeyboardInterrupt:\n return\n\n target = target.split(\" \")\n\n if len(v.targets) == 0:\n try:\n gw = input(\" Enter IP address of router (leave blank if same subnet): \")\n except KeyboardInterrupt:\n return\n if validIPAddress(gw):\n tmp = spoofer.get_mac(gw)\n if tmp:\n v.targets.append(gw)\n v.macs.append(tmp)\n else:\n print(\" \" + bcolors.WARNING + \"Did not add \" + gw + \" since no mac address found\" + bcolors.ENDC)\n time.sleep(2)\n return\n else:\n gw = getGwIp(target[0])\n if gw:\n tmp = spoofer.get_mac(gw)\n if tmp:\n v.targets.append(gw)\n v.macs.append(tmp)\n else:\n if gw:\n print(\" \" + bcolors.WARNING + \"Did not add \" + gw + \" since no mac address found\" + bcolors.ENDC)\n time.sleep(1)\n return\n\n for x in target:\n if validIPAddress(x):\n tmp = spoofer.get_mac(x)\n if tmp:\n v.targets.append(x)\n v.macs.append(x)\n else:\n print(\" \" + bcolors.WARNING + \"Did not add \" + x + \" since no mac address found\" + bcolors.ENDC)\n time.sleep(1)\n else:\n print(\" \" + bcolors.WARNING + x + \" is not a valid ip address\" + bcolors.ENDC)\n time.sleep(1)\n\n return", "def _check_regression_targets(y):\n y_type = type_of_target(y)\n if y_type not in ['continuous', 'binary', 'multiclass']:\n raise ValueError(\"Unknown response value type: %r\" % y_type)", "def verifyActionCenterFirewall():\n pass", "def can_target(name):\n return False", "def test_localhost_is_valid(self):\n val = gnome.gh.EventSourceValidator()\n validity = val.ip_str_is_valid('127.0.0.1')\n self.assertTrue(validity)", "def is_valid(values, dataset):\r\n # Only includes negative screens.\r\n if values[SCREEN_TYPE_COL] != \"negative selection\":\r\n STATS[NOT_NEG_SCREEN] += 1\r\n return False\r\n # Targets must have the correct length.\r\n if int(values[dataset.end_idx]) - int(values[dataset.start_idx]) !=\\\r\n consts.TARGET_LEN:\r\n STATS[WRONG_END_MINUS_START] += 1\r\n return False\r\n\r\n target = dataset.get_target(values)\r\n # Targets must have an NGG PAM sequence.\r\n if not target.endswith(\"GG\"):\r\n STATS[BAD_PAM] += 1\r\n return False\r\n # Another safety measure against targets with the wrong length.\r\n if len(target) != consts.TARGET_LEN:\r\n STATS[TARGET_BAD_LEN] += 1\r\n return False\r\n return True", "def test_ipv4_validation_failure():\n with pytest.raises(socket.error):\n is_ipv4('256.8.8.8')", "def testTargets(self):\n\n self.inv._devices = collections.OrderedDict([\n ('device_a', self.Device()), ('device_b', self.Device()),\n ('device_c', self.Device()), ('bogus', self.Device())])\n\n # Null command with no targets.\n self.assertEqual('Targets: ', self.inv._CmdFilter('targets', []))\n self.assertEqual('XTargets: ', self.inv._CmdFilter('xtargets', []))\n\n # Single host.\n self.inv._CmdFilter('targets', ['device_c'])\n self.assertEqual(['device_c'], self.inv.device_list)\n # Nonexistant host - rejected.\n self.assertRaises(ValueError, self.inv._CmdFilter,\n 'targets', ['nonexistant'])\n self.assertEqual(['device_c'], self.inv.device_list)\n\n # Multiple hosts.\n self.inv._CmdFilter('targets', ['device_c,device_a'])\n self.assertEqual(['device_a', 'device_c'], self.inv.device_list)\n\n # Build target with incremental suffix addition.\n self.inv._CmdFilter('targets', ['device_c'])\n self.inv._CmdFilter('targets', ['device_a'], True)\n self.assertEqual(['device_a', 'device_c'], self.inv.device_list)\n\n self.inv._CmdFilter('targets', ['^'])\n self.inv._CmdFilter('targets', ['device_c,device_a'], True)\n self.assertEqual(['device_a', 'device_c'], self.inv.device_list)\n\n # Null command with targets.\n self.assertEqual('Targets: device_c,device_a',\n self.inv._CmdFilter('targets', []))\n\n # Clean targets.\n # Unlike other filters, blank targets is not a match.\n self.inv._CmdFilter('targets', ['^'])\n self.assertEqual(self.inv.device_list, [])\n self.inv._CmdFilter('targets', ['^$'])\n self.assertEqual(self.inv.device_list, [])", "def test_ipv6_validation_success():\n assert is_ipv6('2001:db8::ff00:42:8329')", "def validateFromET(cls,root,target):\n corpus = Corpus()\n if corpus.readFromET(root):\n return( Validator.validate(corpus,target) )\n else:\n return(False)", "def IsTarget(self, target_name):\n return target_name in self.GetTargets()", "def _mac_test(mac):\n\n\t\tif re.search(r'([0-9A-F]{2}[:]){5}([0-9A-F]){2}', mac.upper()) is not None:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def test_ipv6_validation_failure():\n with pytest.raises(socket.error):\n is_ipv6('2001::0234:C1ab::A0:aabc:003F')", "def test_good_addresses_are_valid(self):\n val = gnome.gh.EventSourceValidator()\n for addr in GOOD_MOCK_ADDRESSES:\n validity = val.ip_str_is_valid(addr)\n self.assertTrue(validity)", "def check(self, targets, partition_size_hint=None):\r\n all_vts = self._sort_and_validate_targets(targets)\r\n invalid_vts = filter(lambda vt: not vt.valid, all_vts)\r\n return InvalidationCheck(all_vts, invalid_vts, partition_size_hint)", "def validate(cls, output_destination):\n # nothing to check :)\n pass", "def validate(cls, output_destination):\n # nothing to check :)\n pass", "async def target_parser(ctx: commands.Context, target: str) -> tuple:\n if target is None:\n target = ctx.author\n target_found = True\n else:\n try:\n target = await commands.MemberConverter().convert(ctx, target)\n target_found = True\n except commands.BadArgument:\n target_found = False\n return (target_found, target)", "def verify_host(self):\n super().verify_host()\n if not self.use_docker:\n if self.tools.host_os != \"Linux\":\n raise UnsupportedHostError(self.supported_host_os_reason)", "def valid_host(host):\n if host in ACCEPTED_HOSTS:\n return True\n return False", "def local_is_up(self, target):\n try:\n check_address(target)\n except ValueError:\n self.logger.warning('Target must be a tuple (IP, port), where IP '\n 'is a string (i.e. \"192.168.0.1\") and port is '\n 'an integer (i.e. 40000). Alternatively '\n 'target can be a valid UNIX domain socket.')\n return False\n\n self.check_tunnels()\n return self.tunnel_is_up.get(target, True)", "def check_target_binary(self, y):\n y_type = type_of_target(y)\n if y_type not in ['binary']:\n raise ValueError('Label type must be binary')", "def _check_for_passively_detected_failures(self, target):\n return self._check_procmon_failures(target=target)", "def validate_application_command_permission_overwrite_target(target):\n # GOTO\n while True:\n if isinstance(target, Role):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_ROLE\n target_id = target.id\n target_lookup_failed = False\n break\n \n if isinstance(target, ClientUserBase):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_USER\n target_id = target.id\n target_lookup_failed = False\n break\n \n if isinstance(target, Channel):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_CHANNEL\n target_id = target.id\n target_lookup_failed = False\n break\n \n if isinstance(target, tuple) and len(target) == 2:\n target_type_maybe, target_id_maybe = target\n \n if isinstance(target_type_maybe, type):\n if issubclass(target_type_maybe, Role):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_ROLE\n elif issubclass(target_type_maybe, ClientUserBase):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_USER\n elif issubclass(target_type_maybe, Channel):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_CHANNEL\n else:\n target_lookup_failed = True\n break\n \n elif isinstance(target_type_maybe, str):\n if target_type_maybe in ('Role', 'role'):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_ROLE\n elif target_type_maybe in ('User', 'user'):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_USER\n elif target_type_maybe in ('Channel', 'channel'):\n target_type = APPLICATION_COMMAND_PERMISSION_OVERWRITE_TARGET_TYPE_CHANNEL\n else:\n target_lookup_failed = True\n break\n \n elif isinstance(target_type_maybe, ApplicationCommandPermissionOverwriteTargetType):\n target_type = target_type_maybe\n \n elif isinstance(target_type_maybe, ApplicationCommandPermissionOverwriteTargetType.VALUE_TYPE):\n target_type = ApplicationCommandPermissionOverwriteTargetType.get(target_type_maybe)\n \n else:\n target_lookup_failed = True\n break\n \n if type(target_id_maybe) is int:\n target_id = target_id_maybe\n elif isinstance(target_id_maybe, int):\n target_id = int(target_id_maybe)\n else:\n target_lookup_failed = True\n break\n \n target_lookup_failed = False\n break\n \n target_lookup_failed = True\n break\n \n if target_lookup_failed:\n raise TypeError(\n f'`target` can be `{Role.__name__}`, `{ClientUserBase.__name__}`, `{Channel.__name__}`, '\n f'`tuple` ((`{Role.__name__}`, `{ClientUserBase.__name__}`, `{Channel.__name__}`, `str` '\n f'(`\\'Role\\'`, `\\'role\\'`, `\\'User\\'`, `\\'user\\'`, `\\'Channel\\'`, `\\'channel\\'`)), `int`), '\n f'got {target.__class__.__name__}: {target!r}.'\n )\n \n return target_type, target_id", "def test_cannot_retrieve_platforms_if_same_target_added_more_than_once(self):\n self.header.target.append(policy.Target(['cisco', 'other_options']))\n with self.assertRaises(policy.HeaderDuplicateTargetPlatformError):\n p = self.header.target\n with self.assertRaises(policy.HeaderDuplicateTargetPlatformError):\n p = self.header.FilterOptions('cisco')\n with self.assertRaises(policy.HeaderDuplicateTargetPlatformError):\n p = self.header.FilterName('cisco')", "def verify_mpls_forwarding_table_outgoing_label(\n device, ip, expected_label=\"\", same_as_local=False,\n max_time=30, check_interval=10):\n timeout = Timeout(max_time, check_interval)\n while timeout.iterate():\n result = True\n\n try:\n out = device.parse('show mpls forwarding-table {}'.format(ip))\n except SchemaEmptyParserError:\n log.info(\"Device output is empty.\")\n result = False\n timeout.sleep()\n continue\n\n reqs = R(['vrf', '(.*)',\n 'local_label', '(?P<local_label>.*)',\n 'outgoing_label_or_vc', '(?P<outgoing_label>.*)',\n 'prefix_or_tunnel_id', '(?P<prefix>.*)',\n 'outgoing_interface', '(?P<interface>.*)',\n 'next_hop', '(?P<next_hop>.*)'])\n found = find([out], reqs, filter_=False, all_keys=True)\n\n if found:\n keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={},\n source=found, all_keys=True)\n for route in keys:\n if same_as_local:\n log.info(\"Interface {route[interface]} has local label \"\n \"'{route[local_label]}' and outgoing label \"\n \"'{route[outgoing_label]}'\".format(route=route))\n if str(route['outgoing_label']) != str(route['local_label']):\n result = False\n else:\n log.info(\n \"Interface {route[interface]} outgoing label is \"\n \"'{route[outgoing_label]}', exepected to have label \"\n \"'{expected}'\".format(route=route, \n expected=expected_label))\n if str(route['outgoing_label']) != str(expected_label):\n result = False\n else:\n log.error(\"Could not find any mpls route for {}\".format(ip))\n result = False\n\n if result is True:\n return result\n\n timeout.sleep()\n\n return result", "def is_valid_host(host):\n host = host.encode('idna').lower()\n if not hasattr(is_valid_host, '_re'):\n is_valid_host._re = re.compile(r'^([0-9a-z][-\\w]*[0-9a-z]\\.)+[a-z0-9\\-]{2,15}$')\n return bool(is_valid_host._re.match(host))", "def test_SELFUPDATE_TARGET(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"SELFUPDATE_TARGET=ywangd:dev selfupdate --check\", exitcode=None)\n self.assertIn(\"Target: ywangd:dev\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)", "def checkHost(host):\n if \"192.168.\" in host:\n return False\n elif \"169.254.\" in host: #APIPA (Automatic Private Internet Protocol Addressing)\n return False\n elif re.match(\"^(127\\.)\",host):\n return False\n elif re.match(\"^(10\\.)\",host):\n return False\n elif re.match(\"^(172\\.1[6-9]\\.)|(172\\.2[0-9]\\.)|(172\\.3[0-1]\\.)\",host):\n return False\n else:\n return True", "def validate(cls,corpus,target):\n printWarning(cls,inspect.stack()[0][3],\n \"Preparing data for xsd validation..\")\n xmlstring = corpus.writeToString()\n printWarning(cls,inspect.stack()[0][3],\n \"Prepared\")\n xsd = Validator.validateXSD(xmlstring,target)\n semantic = Validator.validateSemantic(corpus,target)\n valid = (xsd and semantic)\n if not valid:\n printError(cls,inspect.stack()[0][3],\n \"Data not valid\")\n return(valid)", "def is_target( self ):\n\n raise NotImplementedError(\"is_target\");", "def verify_as_target(self, message_handler):\n\n super().verify_as_target(message_handler)\n\n if self.msvc_target() != '64':\n raise UserException(\"MSVC is not configured for a 64-bit target\")", "def analyze_local_binary_get_target_addresses(target_binary, target_platform, target_architecture, target_type, target_port, target_prefix, target_offset):\n binaries = [target_binary]\n\n if target_platform == PLATFORM_WINDOWS:\n additional_binaries = prompt_base(\"are there any dlls associated with this binary? (separate with a space)\")\n binaries.extend([os.path.abspath(binary) for binary in additional_binaries.split(\" \")])\n\n log(\"locating targetable jump instructions\")\n\n all_targetable_jumps = []\n\n for binary in binaries:\n # todo: rewrite to be more graceful\n objdump = subprocess.Popen([\"objdump\", \"-D\", binary],stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n grepjmp = subprocess.Popen([\"grep\", \"jmp\"], stdin=objdump.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n grepesp = subprocess.Popen([\"grep\", \"esp\"], stdin=grepjmp.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n results = grepesp.stdout.readlines()\n\n start_address = get_binary_start_address(binary)\n binary_short_name = os.path.basename(binary)\n\n if results is not None:\n for line in results:\n instruction = line.decode().strip()\n all_targetable_jumps.append([instruction, binary_short_name, start_address])\n \n if len(all_targetable_jumps) > 1:\n target_instruction = prompt_table(\"select an instruction to target.\", all_targetable_jumps)\n elif len(all_targetable_jumps) == 1:\n target_instruction = all_targetable_jumps[0]\n else:\n log_error(\"no targetable addresses found\")\n\n\n target_instruction_address = target_instruction[0][:8]\n target_source_file = target_instruction[1]\n target_base_address = target_instruction[2][-8:]\n\n target_instruction_offset_distance = int(target_instruction_address, 16) - int(target_base_address, 16)\n\n log(f\"selected the instruction in {target_source_file} at 0x{target_instruction_address} (0x{target_base_address} + {target_instruction_offset_distance}\")\n \n return (target_source_file, target_base_address, target_instruction_address, target_instruction_offset_distance)", "def has_invalid_targets(self):\n return len(self._combined_invalid_versioned_targets.targets) > 0", "def validate_uuid(self, uuid_to_check):\r\n if re.fullmatch(BASE62_REGEX, uuid_to_check):\r\n return 20 <= len(uuid_to_check) <= 22\r\n else:\r\n return False", "def test_target_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example\")\n assert bb.target[\"name\"] == \"example\"", "def mnemonic_is_valid(mnemonic: str, wordlist=WORDLIST):\n try:\n mnemonic_to_bytes(mnemonic, wordlist=wordlist)\n return True\n except Exception as e:\n return False", "def test_get_ip_type_by_address(setup_config, get_mock_event):\n\n # !ARRANGE!\n bad_bots = BadBots(setup_config, get_mock_event)\n\n ipv4_address_1 = '1.1.1.1'\n ipv4_address_2 = '11.22.33.44'\n ipv4_address_3 = '123.123.123.123'\n\n ipv6_address_1 = '2a02:a445:6d36:1:1e3:a188:313c:1d31'\n ipv6_address_2 = '3731:54:65fe:2::a7'\n ipv6_address_3 = 'fd07:a47c:3742:823e:3b02:76:982b:463'\n\n # !ACT!\n\n # Detect the IP type of provided IP addresses\n ipv4_address_1_type = bad_bots.get_ip_type_by_address(ipv4_address_1)\n ipv4_address_2_type = bad_bots.get_ip_type_by_address(ipv4_address_2)\n ipv4_address_3_type = bad_bots.get_ip_type_by_address(ipv4_address_3)\n\n ipv6_address_1_type = bad_bots.get_ip_type_by_address(ipv6_address_1)\n ipv6_address_2_type = bad_bots.get_ip_type_by_address(ipv6_address_2)\n ipv6_address_3_type = bad_bots.get_ip_type_by_address(ipv6_address_3)\n\n # !ASSERT!\n\n # Assert IP addresses are of type IPv4\n assert ipv4_address_1_type.value == BadBots.SourceIPType.IPV4.value\n assert ipv4_address_2_type.value == BadBots.SourceIPType.IPV4.value\n assert ipv4_address_3_type.value == BadBots.SourceIPType.IPV4.value\n\n # Assert IP addresses are of type IPv6\n assert ipv6_address_1_type.value == BadBots.SourceIPType.IPV6.value\n assert ipv6_address_2_type.value == BadBots.SourceIPType.IPV6.value\n assert ipv6_address_3_type.value == BadBots.SourceIPType.IPV6.value", "def hmVerifyMsgCRCOK(destination, protocol, source, expectedFunction, expectedLength, datal) :\r\n badresponse = 0\r\n if protocol == constants.HMV3_ID:\r\n checksum = datal[len(datal)-2:]\r\n rxmsg = datal[:len(datal)-2]\r\n crc = crc16() # Initialises the CRC\r\n expectedchecksum = crc.run(rxmsg)\r\n if expectedchecksum == checksum:\r\n print(\"CRC is correct\")\r\n else:\r\n print(\"CRC is INCORRECT\")\r\n s = \"Incorrect CRC: %s Expected: %s \\n\" % (datal, expectedchecksum)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n # Check the response\r\n dest_addr = datal[0]\r\n frame_len_l = datal[1]\r\n frame_len_h = datal[2]\r\n frame_len = (frame_len_h << 8) | frame_len_l\r\n source_addr = datal[3]\r\n func_code = datal[4]\r\n\r\n\r\n\r\n if (dest_addr != 129 and dest_addr != 160):\r\n print(\"dest_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (dest_addr != destination):\r\n print(\"dest_addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr < 1 or source_addr > 32):\r\n print(\"source_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr != source):\r\n print(\"source addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != constants.FUNC_WRITE and func_code != constants.FUNC_READ):\r\n print(\"Func Code is UNKNWON\")\r\n s = \"%s : Controller %s : Unknown Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != expectedFunction):\r\n print(\"Func Code is UNEXPECTED\")\r\n s = \"%s : Controller %s : Unexpected Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code == constants.FUNC_WRITE and frame_len != 7):\r\n # Reply to Write is always 7 long\r\n print(\"response length is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (len(datal) != frame_len):\r\n print(\"response length MISMATCHES header\")\r\n s = \"%s : Controller %s : Mismatch length: %s %s\\n\" % (localtime, loop, len(datal), frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n \"\"\"if (func_code == constants.FUNC_READ and expectedLength !=len(datal) ):\r\n # Read response length is wrong\r\n print(\"response length not EXPECTED value\")\r\n print(len(datal))\r\n print(datal)\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\"\"\"\r\n if (badresponse == 0):\r\n return True\r\n else:\r\n return False\r\n\r\n else:\r\n assert 0, \"Un-supported protocol found %s\" % protocol", "def validate_device_id(device_id):\n regex = re.compile(r'^[0-9a-fA-F]{2,6}$')\n if regex.match(device_id) == None:\n raise ValidationError('Device ID must be 2-6 characters and must be hexadecimal (0-9,a-f,A-F).')", "def ValidateCheckOperationTest(op_type_name, is_last, allow_signature,\n allow_unhashed, fail_src_extents,\n fail_dst_extents,\n fail_mismatched_data_offset_length,\n fail_missing_dst_extents, fail_src_length,\n fail_dst_length, fail_data_hash,\n fail_prev_data_offset, fail_bad_minor_version):\n op_type = _OpTypeByName(op_type_name)\n\n # REPLACE/REPLACE_BZ/REPLACE_XZ operations don't read data from src\n # partition. They are compatible with all valid minor versions, so we don't\n # need to check that.\n if (op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,\n common.OpType.REPLACE_XZ) and (fail_src_extents or\n fail_src_length or\n fail_bad_minor_version)):\n return False\n\n # MOVE and SOURCE_COPY operations don't carry data.\n if (op_type in (common.OpType.MOVE, common.OpType.SOURCE_COPY) and (\n fail_mismatched_data_offset_length or fail_data_hash or\n fail_prev_data_offset)):\n return False\n\n return True", "def validate_target(new_data_folder, target_name, proposal_ref):\n # Don't need\n del proposal_ref\n\n validate_dict = {'Location': [], 'Error': [], 'Line number': []}\n\n # Check if there is any data to process\n target_path = os.path.join(new_data_folder, target_name)\n\n # Assume success...\n validated = True\n\n # A target directory must exist\n if not os.path.isdir(target_path):\n validate_dict = add_tset_warning(validate_dict, 'Folder',\n 'Folder does not match target name.'\n f' Expected \"{target_name}\".'\n f' Is the upload called \"{target_name}.zip\"?', 0)\n # No point in checking anything else if this check fails\n validated = False\n\n if validated:\n # An 'aligned' directory must exist\n aligned_path = os.path.join(target_path, 'aligned')\n if not os.path.isdir(aligned_path):\n validate_dict = add_tset_warning(validate_dict, 'Folder',\n 'No aligned folder present.'\n f' Expected \"{target_name}/{aligned_path}\"', 0)\n # No point in checking anything else if this check fails\n ok_so_far = False\n\n if validated:\n # A metadata.csv file must exist\n metadata_file = os.path.join(aligned_path, 'metadata.csv')\n if os.path.isfile(metadata_file):\n validated, validate_dict = check_metadata(metadata_file, validate_dict)\n else:\n validate_dict = add_tset_warning(validate_dict, 'File',\n 'No metedata file present.'\n f' Expected \"{target_name}/{aligned_path}/{metadata_file}\"', 0)\n validated = False\n\n return validated, validate_dict", "def is_target(X, require_attrs=None):\n\n if require_attrs is None:\n require_attrs = (\n name for name in vars(Target) if not name.startswith(\"_\")\n )\n\n return all([hasattr(X, name) for name in require_attrs])", "def test_validate_hostname_port(self, input_string, expected_result):\n test_result = validate_hostname_port(input_string)\n\n # Assert expected result\n self.assertEqual(expected_result, test_result)", "def validate_syntax(self):\n self._validate_network_prefix()\n self._validate_zero_network()\n self._validate_families()\n self._validate_unicast_addresses()\n self._validate_addresses()\n self._validate_gateway()\n self._validate_metric()", "def _validate_expose_in_dhcp_and_mac(self):\n from ralph.networks.models import IPAddress\n try:\n if not self.mac and self.ipaddress.dhcp_expose:\n raise ValidationError(\n _('MAC cannot be empty if record is exposed in DHCP')\n )\n except IPAddress.DoesNotExist:\n pass", "def test_verify_list_of_devices_in_my_network():", "def test_target_arg(self, parse_input):\n with pytest.warns(SyntaxWarning, match=\"only accept keyword options\"):\n parse_input(\"name testname\\nversion 1.0\\ntarget example (6)\")", "def __VerifyTargetPath(self, t_path):\n if not t_path:\n return\n\n assert t_path and (t_path[0] == \"/\") and (t_path[-1] != \"/\")\n end_idx = t_path.find(\"/\", 1)\n sub_path = t_path[1:] if (end_idx == -1) else t_path[1:end_idx]\n if sub_path in RESERVED_WORD_SET:\n raise exceptions.PublishServeException(\n \"System reserved word %s is used in target path %s.\" % (sub_path,\n t_path))\n\n vh_path_list = self._publish_helper.GetVsUrlPathList()\n sub_path = t_path if end_idx == -1 else t_path[:end_idx]\n if sub_path in vh_path_list:\n raise exceptions.PublishServeException(\n \"System reserved word %s is used in target path %s.\" % (sub_path,\n t_path))", "def matches(self, target):\n if len(target) < len(self):\n return False\n for offset, value in self._mask:\n if target[offset:offset+len(value)] != value:\n return False\n return True" ]
[ "0.7149191", "0.64991635", "0.64354825", "0.63836634", "0.62301147", "0.62007254", "0.6052976", "0.6006137", "0.59964573", "0.5994269", "0.59239537", "0.58881646", "0.58818513", "0.5870957", "0.5835328", "0.57881117", "0.575523", "0.5737461", "0.57259214", "0.5655272", "0.5633665", "0.56186795", "0.5612421", "0.5601692", "0.5570505", "0.5554997", "0.5472994", "0.5472263", "0.54712343", "0.54681593", "0.5431116", "0.5389869", "0.5344747", "0.53421354", "0.5336022", "0.52609533", "0.52488714", "0.52472687", "0.524699", "0.5190649", "0.5186148", "0.51772976", "0.51645166", "0.51594937", "0.5136413", "0.5114838", "0.5113255", "0.5101507", "0.5097864", "0.50947434", "0.5073125", "0.50668776", "0.5036194", "0.5033411", "0.50251776", "0.5020263", "0.5015829", "0.5009872", "0.500563", "0.5000166", "0.4980995", "0.49724963", "0.49707687", "0.49691552", "0.4962988", "0.4958386", "0.4958386", "0.49561235", "0.4944776", "0.49431685", "0.4942976", "0.49423566", "0.49266103", "0.49234375", "0.49156013", "0.49136043", "0.49095136", "0.4906913", "0.4900988", "0.48989245", "0.48937437", "0.48810086", "0.48640272", "0.48639596", "0.48497066", "0.48479", "0.4843609", "0.48386323", "0.4819054", "0.48137563", "0.48014432", "0.47971466", "0.47951627", "0.47921076", "0.47638", "0.4760979", "0.47533947", "0.47500187", "0.47482705", "0.47475994" ]
0.76750195
0
mac_address checks that a given string is in MAC address format
def mac_address(addr): mac = addr.upper() if not _mac_address_pattern.fullmatch(mac): raise TypeError('{} does not match a MAC address pattern'.format(addr)) return mac
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_valid_mac(address):\n m = \"[0-9a-f]{2}([-:])[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$\"\n if isinstance(address, six.string_types) and re.match(m, address.lower()):\n return True\n return False", "def isMAC(s):\n\n s = s.replace(':', '')\n if len(s) != 12: return 0\n for char in s:\n if re.compile('[a-zA-Z0-9]+').match(char) == None: return 0\n return 1", "def _mac_test(mac):\n\n\t\tif re.search(r'([0-9A-F]{2}[:]){5}([0-9A-F]){2}', mac.upper()) is not None:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def valid_mikettle_mac(mac, pat=re.compile(r\"[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}\")):\n if not pat.match(mac.upper()):\n raise argparse.ArgumentTypeError('The MAC address \"{}\" seems to be in the wrong format'.format(mac))\n return mac", "def isMacAddr(string):\n return (True)", "def isValidMACAddress(self, macAddress):\n if re.match(\"[0-9a-f]{2}([-:])[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$\", macAddress.lower()):\n return True\n return False", "def validate_and_normalize_mac(address):\n if not is_valid_mac(address):\n if constants.CLONE_ISO_MAC in address:\n # get interface name from the label\n intf_name = address.rsplit('-', 1)[1][1:]\n raise exception.ClonedInterfaceNotFound(intf=intf_name)\n else:\n raise exception.InvalidMAC(mac=address)\n return address.lower()", "def mac_aton(str):\n macbytes = [int(i, 16) for i in str.split(':')]\n return struct.pack('6B', *macbytes)", "def valid_mitemp_mac(mac, pat=re.compile(r\"4C:65:A8:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}\")):\n if not pat.match(mac.upper()):\n raise argparse.ArgumentTypeError('The MAC address \"{}\" seems to be in the wrong format'.format(mac))\n return mac", "def checkMACAddress(MACAddress):\n \n MACPattern = re.compile('^[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}:[a-fA-F0-9]{2}$')\n MACMatch = MACPattern.match(MACAddress)\n \n return MACPattern.match(MACAddress)", "def regmac(mac):\n return len(mac.split(\":\")[1]) == 12 and mac.split(\":\")[1] or None", "def check_eth_address_format(address):\n if len(address) != 42 or address[:2] != '0x':\n return False\n\n for ch in address[2:]:\n if ch not in \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890\":\n return False\n\n return True", "def _mac_addr(address):\n return ':'.join('%02x' % ord(b) for b in address)", "def check_address_format(address):\n if len(address) != 42 or address[:2] != '0x':\n return False\n\n for ch in address[2:]:\n if ch not in \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890\":\n return False\n\n return True", "def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)", "def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)", "def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)", "def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)", "def isMac(cls, mac):\n return mac.startswith(cls.MAC_PREFIX)", "def get_mac_addr(bytes):\n bytes_str = map('{:02x}'.format, bytes)\n mac_addr = ':'.join(bytes_str).upper()\n return mac_addr", "def chkformac(mac, srcdata):\n import re\n ans = re.compile(mac).search(srcdata)\n return ans and True or False", "def eth_addr(a):\n if isinstance(a, bytes):\n a = a.decode(\"latin\")\n string = \"%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\"\n mac = string % (ord(a[0]), ord(a[1]), ord(a[2]),\n ord(a[3]), ord(a[4]), ord(a[5]))\n return mac", "def good_mac(mac):\r\n return mac.upper().replace('-', ':')", "def mac_addr(address):\n\tprint(':'.join('%02x' % compat_ord(b) for b in address))\n\treturn ':'.join('%s' % format(compat_ord(b), '0>8b') for b in address)", "def mac_ntoa(mac):\n return '%.2x:%.2x:%.2x:%.2x:%.2x:%.2x' % tuple(map(ord, list(mac)))", "def get_mac_addr():\n suffix = None\n while suffix is None:\n orig_suffix = input('Enter the remaining 4 character MAC address suffix (e.g. fa34): ')\n # remove all character spacers\n strip_spacers = orig_suffix.maketrans({x: None for x in ':-.'})\n suffix = orig_suffix.translate(strip_spacers)\n\n # check if it's a valid hex string\n invalid_hex = False\n try:\n int(suffix, 16)\n except ValueError:\n invalid_hex = True\n\n if len(suffix) != 4 or invalid_hex:\n print('Invalid MAC address suffix: %s' % orig_suffix)\n suffix = None\n\n mac_addr = MAC_ADDR_OUI + suffix\n mac_addr = '%s%s:%s%s:%s%s:%s%s:%s%s:%s%s' % tuple(mac_addr.lower())\n return mac_addr", "def validate_address(address:str) -> bool:\r\n return True", "def mac_pton(s):\n return binascii.unhexlify(s.replace(\":\", \"\"))", "def check_afm(afm):\n \n if not isinstance(afm, str):\n raise TypeError( \"check_afm()\", \"You should feed to this function only strings to avoid exceptions and errors! Aborting.\" )\n if len(afm) == 11 and afm[:2].upper() == \"EL\":\n afm=afm[2:]\n if afm.isdigit() == True and len(afm) == 9:\n i, sums = 256, 0\n for digit in afm[:-1]:\n sums += int(digit) * i\n i /= 2\n checksum = sums % 11\n if int(afm[-1]) == int(checksum) or (checksum==10 and afm[-1]==\"0\"):\n return True\n return False", "def create_magic_packet(macaddress: str) -> bytes:\n if len(macaddress) == 17:\n sep = macaddress[2]\n macaddress = macaddress.replace(sep, \"\")\n elif len(macaddress) == 14:\n sep = macaddress[4]\n macaddress = macaddress.replace(sep, \"\")\n if len(macaddress) != 12:\n raise ValueError(\"Incorrect MAC address format\")\n return bytes.fromhex(\"F\" * 12 + macaddress * 16)", "def emulab_mac(mac):\n\n return \"\".join(mac.lower().split(':'))", "def leasemac(macstr):\n maclst = []\n while macstr:\n maclst.append(macstr[:2].lower())\n macstr = macstr[2:]\n newmac = \":\".join(\"%s\" % s for s in maclst)\n return newmac", "def validate_ethereum_address(address: str):\n\n if len(address) < 42:\n raise ValueError(\"Not an Ethereum address: {}\".format(address))\n\n try:\n if not is_hex_address(address):\n raise ValueError(\"Not an Ethereum address: {}\".format(address))\n except UnicodeEncodeError:\n raise ValueError(\"Could not decode: {}\".format(address))\n\n # Check if checksummed address if any of the letters is upper case\n if any([c.isupper() for c in address]):\n if not is_checksum_address(address):\n raise ValueError(\"Not a checksummed Ethereum address: {}\".format(address))", "def check_ont_address_format(address):\n if len(address) != 34:\n return False\n\n for ch in address:\n if ch not in '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz':\n return False\n\n return True", "def validate(self, value):\n super(MACAddressField, self).validate(value)\n if value:\n try:\n value = EUI(str(value), dialect=mac_bare)\n return\n except (ValueError, TypeError, ValidationError):\n raise ValidationError(self.error_messages[\"invalid\"] % {\"value\": value})", "def get_mac_address(self, result, host):\n if \"mac\" in result['scan'][host][\"addresses\"]:\n return result['scan'][host][\"addresses\"][\"mac\"]\n else:\n return \"\"", "def test_unit_mac_address_decode(self):\n octet0 = 0xFF\n octet1 = 0xFE\n octet2 = 0xFB\n octet3 = 0xFA\n octet4 = 0xF7\n octet5 = 0xF6\n decode = MidniteClassicModbusRegisters.UNIT_MAC_ADDRESS['decode']\n registers = []\n registers.append((octet1 << 8) | octet0)\n registers.append((octet3 << 8) | octet2)\n registers.append((octet5 << 8) | octet4)\n expected = {\n 'mac_address': [hex(octet5),\n hex(octet4),\n hex(octet3),\n hex(octet2),\n hex(octet1),\n hex(octet0)]\n }\n self.assertDictEqual(expected, decode(registers))\n registers = ['A', 'B', 'C']\n self.assertRaises(TypeError, decode, registers)\n registers = []\n self.assertRaises(IndexError, decode, registers)", "def is_valid_address(address) -> bool:\n if not address.startswith('one1'):\n return False\n hrp, _ = bech32_decode(address)\n if not hrp:\n return False\n return True", "def get_mac_address(ifname):\n try:\n return open('/sys/class/net/' + ifname + '/address') \\\n .readline().strip()\n except:\n SysTools.logger.error(\"Failed to get mac-address of %s\", ifname)\n return \"00:00:00:00:00:00\"", "def _arp(ip_address):\n cmd = ['arp', '-n', ip_address]\n arp = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n out, _ = arp.communicate()\n match = re.search(r'(([0-9A-Fa-f]{1,2}\\:){5}[0-9A-Fa-f]{1,2})', str(out))\n if match:\n return match.group(0)\n _LOGGER.info('No MAC address found for %s', ip_address)\n return None", "def test_add_macaddress(self):\n mac = '00:00:00:00:00:00'\n info = self.api.add_macaddress(mac, tags=['asd'])\n self.assertEqual(info['value'], mac)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])", "def get_mac_address():\n eth0_interface = 'eth0'\n addresses = netifaces.ifaddresses(eth0_interface)[netifaces.AF_LINK][0]\n mac_address = addresses['addr']\n return mac_address", "def convert_multicast_ip_to_mac(ip_address):\n # Convert the IP String to a bit sequence string\n try:\n ip_binary = socket.inet_pton(socket.AF_INET, ip_address)\n ip_bit_string = ''.join(['{0:08b}'.format(ord(x)) for x in ip_binary])\n except socket.error:\n raise RuntimeError('Invalid IP Address to convert.')\n\n # The low order 23 bits of the IP multicast address are mapped directly to the low order\n # 23 bits in the MAC-layer multicast address\n lower_order_23 = ip_bit_string[-23:]\n\n # The high order 25 bits of the 48-bit MAC address are fixed\n high_order_25 = '0000000100000000010111100'\n\n mac_bit_string = high_order_25 + lower_order_23\n\n # Convert the bit string to the Typical MAC Address String\n final_string = '{0:012X}'.format(int(mac_bit_string, 2))\n mac_string = ':'.join(s.encode('hex') for s in final_string.decode('hex'))\n return mac_string.upper()", "def mac_address(self):\n if self._mac_address is None:\n expression = expressions.WPA_MAC\n name = expressions.MAC_ADDRESS_NAME\n command = self.status_command\n self._mac_address = self._match(expression,\n name,\n command)\n return self._mac_address", "def bytes_to_mac_str(buff):\n if len(buff) != DataDescription.B_SEQ_MAC_LEN:\n raise TypeError(\"Invalid input\")\n return \"%02X:%02X:%02X:%02X:%02X:%02X\" % buff", "def _get_mac_address():\n if not sys.platform.startswith('linux'):\n raise RuntimeError(\n 'Cannot get the MAC address on non-Linux platforms'\n )\n ifname = get_default_iface_name_linux()\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n info = fcntl.ioctl(s.fileno(), 0x8927,\n struct.pack('256s', bytes(ifname, 'utf-8')[:15]))\n return ''.join('%02x' % b for b in info[18:24])", "def get_mac_from_raw_query(request_raw_query: str):\n mac = \"\"\n raw_query_list = request_raw_query.split(\"&\")\n for param in raw_query_list:\n if \"mac=\" in param:\n mac = param.replace(\"mac=\", \"\")\n if not mac:\n raise AttributeError(\"%s is not parsable\" % request_raw_query)\n return mac.replace(\"-\", \":\")", "def isMACCommand(self):\n return self.payload.fport == 0", "def generate_mac():\n rand_str = generate_name(choices=\"0123456789abcdef\", length=12)\n return \":\".join(re.findall(\"..\", rand_str))", "def generate_mac():\n rand_str = generate_name(choices=\"0123456789abcdef\", length=12)\n return \":\".join(re.findall(\"..\", rand_str))", "def InputMacAddress(prompt):\n while 1:\n SetPatternColor()\n print prompt,\n SetPatternColor(1)\n val = raw_input().strip()\n SetPatternColor()\n try:\n #if len(val) == 12 and int(val,16):\n if (len(val) == 12 or len(val) == 10): \n return val.upper()\n except ValueError:\n pass\n #SetPattern(0)\n SetPatternColor(0)\n print \"ERROR %s!!\"%prompt", "def check_address(address):\n try:\n int(address, 16) # check if hex\n if (len(address) * 4) != 160: raise ValueError(\"Address is not 160 bits long.\")\n except ValueError as e:\n print(\"Invalid address '%s'.\" % address)\n print(e)\n sys.exit(1)", "async def test_dhcp_match_macaddress(hass: HomeAssistant) -> None:\n integration_matchers = [{\"domain\": \"mock-domain\", \"macaddress\": \"B8B7F1*\"}]\n\n packet = Ether(RAW_DHCP_REQUEST)\n\n async_handle_dhcp_packet = await _async_get_handle_dhcp_packet(\n hass, integration_matchers\n )\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n await async_handle_dhcp_packet(packet)\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"connect\",\n macaddress=\"b8b7f16db533\",\n )", "def isHex(string, needHexPrefix):\n return (True)", "def check(interface, mac):\n\tifconfig = sp.check_output(['sudo','ifconfig',interface]).decode()\n\tregexMax = re.compile(r'(\\w\\w:){5}\\w\\w')\n\tresult = regexMax.search(ifconfig)\n\tif not result == None and result.group() == mac:\n\t\tprint('Mac changed')\n\t\tprint('[+] '+interface+' --> '+mac)\n\telse:\n\t\tprint('[[[[!]]]] Faliour',result.group())", "def __get_mac_address(self):\n str_hex_mac = uuid.UUID(int=uuid.getnode()).hex[-12:]\n return str_hex_mac", "def checkMac(self, mac):\n\t\tif mac in self.seenMacs:\n\t\t\treturn True\n\t\treturn False", "def address_checksum_and_decode(addr: str) -> typing.Address:\n if addr[:2] != '0x':\n raise InvalidAddress('Address must be 0x prefixed')\n\n if not is_checksum_address(addr):\n raise InvalidAddress('Address must be EIP55 checksummed')\n\n addr = unhexlify(addr[2:])\n assert len(addr) in (20, 0)\n return addr", "def is_valid_address(self, address):\n assert isinstance(address, tuple), type(address)\n assert len(address) == 2, len(address)\n assert isinstance(address[0], str), type(address[0])\n assert isinstance(address[1], int), type(address[1])\n\n if address[0] == \"\":\n return False\n\n if address[0] == \"0.0.0.0\":\n return False\n\n if address[1] <= 0:\n return False\n\n try:\n binary = inet_aton(address[0])\n except socket_error:\n return False\n\n # ending with .0\n#Niels: is now allowed, subnet mask magic call actually allow for this\n# if binary[3] == \"\\x00\":\n# return False\n\n # ending with .255\n if binary[3] == \"\\xff\":\n return False\n\n return True", "def random_mac():\n return '\"02:%02x:%02x:%02x:%02x:%02x\"' % (random.randint(0,255),\n random.randint(0,255),\n random.randint(0,255),\n random.randint(0,255),\n random.randint(0,255))", "def set_mac_address(self, iface):\n if os.path.exists(\"/sys/class/net/%s\" % iface):\n return open(\"/sys/class/net/%s/address\" % iface).read().strip()\n return \"none\"", "def set_mac_address(self, iface):\n if os.path.exists(\"/sys/class/net/%s\" % iface):\n return open(\"/sys/class/net/%s/address\" % iface).read().strip()\n return \"none\"", "def is_valid_ipv6_address(ip_str):\r\n # We need to have at least one ':'.\r\n if ':' not in ip_str:\r\n return False\r\n\r\n # We can only have one '::' shortener.\r\n if ip_str.count('::') > 1:\r\n return False\r\n\r\n # '::' should be encompassed by start, digits or end.\r\n if ':::' in ip_str:\r\n return False\r\n\r\n # A single colon can neither start nor end an address.\r\n if ((ip_str.startswith(':') and not ip_str.startswith('::')) or\r\n (ip_str.endswith(':') and not ip_str.endswith('::'))):\r\n return False\r\n\r\n # We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)\r\n if ip_str.count(':') > 7:\r\n return False\r\n\r\n # If we have no concatenation, we need to have 8 fields with 7 ':'.\r\n if '::' not in ip_str and ip_str.count(':') != 7:\r\n # We might have an IPv4 mapped address.\r\n if ip_str.count('.') != 3:\r\n return False\r\n\r\n ip_str = _explode_shorthand_ip_string(ip_str)\r\n\r\n # Now that we have that all squared away, let's check that each of the\r\n # hextets are between 0x0 and 0xFFFF.\r\n for hextet in ip_str.split(':'):\r\n if hextet.count('.') == 3:\r\n # If we have an IPv4 mapped address, the IPv4 portion has to\r\n # be at the end of the IPv6 portion.\r\n if not ip_str.split(':')[-1] == hextet:\r\n return False\r\n if not is_valid_ipv4_address(hextet):\r\n return False\r\n else:\r\n try:\r\n # a value error here means that we got a bad hextet,\r\n # something like 0xzzzz\r\n if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:\r\n return False\r\n except ValueError:\r\n return False\r\n return True", "def check_hmac(mac, data):\n h_mac = hmac.new(args['m'], bytes(data), digestmod=hashlib.sha256).digest()\n print 'HMAC validation: \\n%s\\n' % str(h_mac == mac)", "def mac_from_vm(vm: libvirt.virDomain = None) -> str:\n doc = minidom.parseString(vm.XMLDesc())\n interfaces = doc.getElementsByTagName('mac')\n return interfaces[0].getAttribute('address')", "def _get_mac_address(self, mac_numbers):\n\n mac = \"\"\n for num in mac_numbers:\n num = self._convert_to_hex(num)\n mac = ':'.join((mac, num))\n mac = mac[1:]\n return mac", "def get_mac(self) -> str:\n self.sendline(\"iw {} info\".format(self.iface_dut))\n # We are looking for MAC definition of STA\n # wdev 0x1\n # addr 96:4e:c9:cc:7a:2c\n # type managed\n self.expect(\"addr (?P<mac>..:..:..:..:..:..)\\r\\n\\t(type|ssid)\")\n return self.match.group('mac')", "def _identify_mac(self, request):\n params = parse_authz_header(request, None)\n if params is None:\n return None\n if params.get(\"scheme\") != \"MAC\":\n return None\n # Check that various parameters are as expected.\n token = params.get(\"id\")\n if token is None:\n msg = \"missing MAC id\"\n return self._respond_unauthorized(request, msg)\n # Check the timestamp and nonce for freshness or reuse.\n # TODO: the spec requires us to adjust for per-client clock skew.\n try:\n timestamp = int(params[\"ts\"])\n except (KeyError, ValueError):\n msg = \"missing or malformed MAC timestamp\"\n return self._respond_unauthorized(request, msg)\n nonce = params.get(\"nonce\")\n if nonce is None:\n msg = \"missing MAC nonce\"\n return self._respond_unauthorized(request, msg)\n if not self.nonce_manager.is_fresh(token, timestamp, nonce):\n msg = \"MAC has stale token or nonce\"\n return self._respond_unauthorized(request, msg)\n # OK, they seem like sensible MAC paramters.\n return params", "def macaddr(index):\n hname = name.encode(\"utf-8\") if not isinstance(name, bytes) else name\n mac_ext = hashlib.md5(hname).hexdigest() # pylint: disable=E1101\n return \"52:54:00:{0}:{1}:{2:02x}\".format(mac_ext[0:2], mac_ext[2:4], int(mac_ext[4:6], 16) ^ index)", "def get_mac(ip):\n ans, _ = srp(Ether(dst='ff:ff:ff:ff:ff:ff')/ARP(pdst=ip), timeout=3, verbose=0)\n if ans:\n return ans[0][1].src\n else:\n return None", "def get_mac_string():\n mac_int = getnode()\n mac_str = ':'.join((\"%012x\" % mac_int)[i:i + 2] for i in range(0, 12, 2))\n return mac_str", "def is_valid_ipv6_address(address, allow_brackets = False):\n\n if allow_brackets:\n if address.startswith('[') and address.endswith(']'):\n address = address[1:-1]\n\n if address.count('.') == 3:\n # Likely an ipv4-mapped portion. Check that its vaild, then replace with a\n # filler.\n\n ipv4_start = address.rfind(':', 0, address.find('.')) + 1\n ipv4_end = address.find(':', ipv4_start + 1)\n\n if ipv4_end == -1:\n ipv4_end = None # don't crop the last character\n\n if not is_valid_ipv4_address(address[ipv4_start:ipv4_end]):\n return False\n\n addr_comp = [address[:ipv4_start - 1] if ipv4_start != 0 else None, 'ff:ff', address[ipv4_end + 1:] if ipv4_end else None]\n address = ':'.join(filter(None, addr_comp))\n\n # addresses are made up of eight colon separated groups of four hex digits\n # with leading zeros being optional\n # https://en.wikipedia.org/wiki/IPv6#Address_format\n\n colon_count = address.count(':')\n\n if colon_count > 7:\n return False # too many groups\n elif colon_count != 7 and '::' not in address:\n return False # not enough groups and none are collapsed\n elif address.count('::') > 1 or ':::' in address:\n return False # multiple groupings of zeros can't be collapsed\n\n for entry in address.split(':'):\n if not re.match('^[0-9a-fA-f]{0,4}$', entry):\n return False\n\n return True", "def test_is_valid(self, address):\n self.test_string(address)\n self.test_alnum(address)", "def test_unicodeAndBytes(self):\n self.assertTrue(isIPv6Address(b\"fe80::2%1\"))\n self.assertTrue(isIPv6Address(u\"fe80::2%1\"))\n self.assertFalse(isIPv6Address(u\"\\u4321\"))\n self.assertFalse(isIPv6Address(u\"hello%eth0\"))\n self.assertFalse(isIPv6Address(b\"hello%eth0\"))", "def from_mac(self):\n try:\n dt_obj = self.epoch_2001 + timedelta(seconds=int(mac))\n self.in_mac = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.in_mac = False\n return self.in_mac", "def ishex(data: str) -> bool:\n return bool(re.fullmatch(r\"^0[x|X][0-9a-fA-F]+\", data)) or bool(re.fullmatch(r\"^[0-9a-fA-F]+[h|H]$\", data))", "def random_mac(pretty=False):\n\t_mac = [\n\t\t0x00, 0x16, 0x3e, \n\t\trandom.randint(0x00, 0x7f),\n\t\trandom.randint(0x00, 0xff),\n\t\trandom.randint(0x00, 0xff)\n\t]\n\tif pretty:\n\t\t_address = ':'.join(map(lambda x: \"%02x\" % x, _mac))\n\telse:\n\t\t_address = ''.join(map(lambda x: \"%02x\" % x, _mac))\n\t\n\treturn _address", "def get_mac(self) -> str:\n hex_mac = hexlify(self.message)[160:172].decode().upper()\n return (\n hex_mac[0:2]\n + \":\"\n + hex_mac[2:4]\n + \":\"\n + hex_mac[4:6]\n + \":\"\n + hex_mac[6:8]\n + \":\"\n + hex_mac[8:10]\n + \":\"\n + hex_mac[10:12]\n )", "def validate_target(target, arp_table):\n try:\n mac = mac_address(target)\n return mac\n except TypeError:\n pass\n \n try:\n ip = ip_address(target)\n if ip in arp_table.keys():\n return arp_table[ip].mac\n except TypeError:\n pass\n\n if target in arp_table:\n return arp_table[target].mac\n else:\n raise TypeError('{} is not a valid target'.format(target))", "def mac_address(self):\n mac = [\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff)\n ]\n return ':'.join(map(lambda x: f\"{x:02X}\", mac))", "def _parse_mac_addr_table(self, cmd_output, mac_regex):\n lines = ensure_string(cmd_output).split(\"\\n\")\n\n arp_table = defaultdict(list)\n for line in lines:\n match = mac_regex.match(line)\n\n if not match:\n continue\n\n groups = match.groups()\n ip_address = groups[0]\n mac_address = groups[1]\n arp_table[mac_address].append(ip_address)\n\n return arp_table", "def from_mac(self):\n reason = \"[!] Mac Absolute timestamps are 9 digits, commonly followed by a decimal and up to 6 digits for milliseconds\"\n ts_type = self.ts_types['mac']\n try:\n if \".\" not in self.mac or not ((len(mac.split(\".\")[0]) == 9) and (len(self.mac.split(\".\")[1]) in range(0, 7))) or not ''.join(self.mac.split(\".\")).isdigit():\n self.in_mac = indiv_output = combined_output = False\n pass\n else:\n dt_obj = self.epoch_2001 + timedelta(seconds=float(self.mac))\n self.in_mac = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"{} {} UTC\".format(ts_type, self.in_mac))\n combined_output = str(\"{}{}\\t\\t{} UTC{}\".format(self.left_color, ts_type, self.in_mac, self.right_color))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_mac = indiv_output = combined_output = False\n return self.in_mac, indiv_output, combined_output, reason", "def check_if_mask(address):\n bin_address = address_to_bin(address)\n bin_str = ''.join(bin_address.split('.'))\n i = 0\n while i < len(bin_str) and bin_str[i] == '1':\n i += 1\n\n if i == 0:\n return False\n\n for j in range(i, len(bin_str)):\n if bin_str[j] == '1':\n return False\n\n return True", "def check_mac_signature(request, secret, params=None):\n if params is None:\n params = parse_authz_header(request, {})\n # Any KeyError here indicates a missing parameter,\n # which implies an invalid signature.\n try:\n expected_sig = get_mac_signature(request, secret, params)\n return not strings_differ(params[\"mac\"], expected_sig)\n except KeyError:\n return False", "def get_mac():\n\n interface = [x for x in netifaces.interfaces() if 'wlan' in x or 'wlp' in x][0]\n return netifaces.ifaddresses(interface)[netifaces.AF_LINK][0]['addr']", "def __isHexString(self, text):\n return all(map(lambda c: c in \"0123456789abcdefABCDEF\", text))", "def mac_address(self, mac_address):\n self._mac_address = mac_address", "def is_valid_ipv6_address(ip_str):\n try:\n ipaddress.IPv6Address(ip_str)\n except ValueError:\n return False\n return True", "def address_decode(self, address : str) -> str:\n if (address[:4] == 'xrb_' or address[:5] == 'nano_' and not self.banano_mode) or (address[:4] == 'ban_' and self.banano_mode):\n account_map = \"13456789abcdefghijkmnopqrstuwxyz\" # each index = binary value, account_lookup[0] == '1'\n account_lookup = {}\n for i in range(0, 32): # populate lookup index with prebuilt bitarrays ready to append\n account_lookup[account_map[i]] = BitArray(uint=i, length=5)\n data = address.split('_')[1]\n acrop_key = data[:-8] # we want everything after 'xrb_' or 'nano_' but before the 8-char checksum\n acrop_check = data[-8:] # extract checksum\n\n # convert base-32 (5-bit) values to byte string by appending each 5-bit value to the bitstring,\n # essentially bitshifting << 5 and then adding the 5-bit value.\n number_l = BitArray()\n for x in range(0, len(acrop_key)):\n number_l.append(account_lookup[acrop_key[x]])\n\n number_l = number_l[4:] # reduce from 260 to 256 bit (upper 4 bits are never used as account is a uint256)\n check_l = BitArray()\n\n for x in range(0, len(acrop_check)):\n check_l.append(account_lookup[acrop_check[x]])\n check_l.byteswap() # reverse byte order to match hashing format\n result = number_l.hex.upper()\n return result\n\n return False", "def mac_str_to_int(mac_str):\n return int(mac_str.replace(':', ''), 16)", "def add_colons_to_mac(self, mac_addr):\n\t\ts = list()\n\t\tfor i in range(12/2): # mac_addr should always be 12 chars, we work in groups of 2 chars\n\t\t\ts.append(mac_addr[i*2:i*2+2])\n\t\tr = \":\".join(s)\n\t\treturn r", "def convert_mac_address(self, outside_address):\n outside_address = self._macfix__result.get(outside_address, outside_address)\n return super(MacFix, self).convert_mac_address(outside_address)", "def _make_addr_resolve(self, addr: 'str | bytes', htype: 'int') -> 'bytes':\n _addr = addr.encode() if isinstance(addr, str) else addr\n\n if htype == Enum_Hardware.Ethernet:\n if PAT_MAC_ADDR.fullmatch(_addr) is not None:\n return _addr.replace(b':', b'').replace(b'-', b'')\n raise ProtocolError(f'Invalid MAC address: {addr!r}')\n return _addr", "def validate(smartAddress):\n\n addressLen = len(smartAddress)\n\n if addressLen < 27 or addressLen > 35:\n return None\n\n try:\n decoded = decode_base58(smartAddress, 25)\n except ValueError:\n return None\n\n # Compare checksum\n checksum = HashKeccak(decoded[:-4])[:4]\n if decoded[-4:] != checksum:\n return None\n\n if smartAddress != encode_base58(decoded):\n return None\n\n return smartAddress", "def test_invalidWithScopeID(self):\n self.assertFalse(isIPv6Address(\"%eth0\"))\n self.assertFalse(isIPv6Address(\":%eth0\"))\n self.assertFalse(isIPv6Address(\"hello%eth0\"))", "def check_token(token):\n valid = re.compile(r\"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-\"\n r\"[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$\")\n\n return valid.match(token)", "def isIpv4Addr(string):\n return (True)", "def is_valid_uuid_string(uuid_str):\n return isinstance(uuid_str, str) and VALID_UUID_REGEX.match(uuid_str)", "def mac(self):\n if not self.is_rule:\n raise NotRuleError(\"No 'ATTR{address}' field.\")\n\n if \"ATTR{address}\" not in self._fields:\n raise NotRule70Error(\"No 'ATTR{address}' field.\")\n\n return self._fields[\"ATTR{address}\"]", "def get_eth0_mac_address(hostname):\n\n command = \"ipmitool -H {} -U ADMIN -P ADMIN raw 0x30 0x21\".format(hostname)\n\n try:\n result = subprocess.check_output(command, shell=True)\n except subprocess.CalledProcessError as exc:\n LOGGER.exception(\"Failed to get eth0 mac for %s\", hostname)\n\n # Remove space and newline\n result = result.strip()\n mac = \":\".join(result.split()[4:]) # No verification :-(\n\n return mac" ]
[ "0.83441114", "0.7788352", "0.74661726", "0.7465587", "0.74038935", "0.73613924", "0.7340942", "0.73230916", "0.7296056", "0.7203996", "0.7073263", "0.6936628", "0.67489415", "0.6534532", "0.64768", "0.64768", "0.64768", "0.64768", "0.6468425", "0.6463173", "0.6461584", "0.6455928", "0.6378788", "0.6356732", "0.62717354", "0.6267242", "0.62486", "0.62443924", "0.6211559", "0.6174044", "0.61074495", "0.6089391", "0.60697156", "0.605126", "0.59549737", "0.5952438", "0.5940022", "0.58696276", "0.5856401", "0.58511484", "0.5832245", "0.57937086", "0.57624525", "0.5758946", "0.5752237", "0.57418233", "0.5738957", "0.5709782", "0.57057613", "0.57057613", "0.5702128", "0.569295", "0.5687862", "0.56844586", "0.56822795", "0.56600267", "0.5643443", "0.5633284", "0.5618398", "0.56101793", "0.56046987", "0.56046987", "0.5593765", "0.5569947", "0.5568025", "0.55571014", "0.555536", "0.55344665", "0.55302775", "0.5528832", "0.55272156", "0.54961187", "0.5482175", "0.54772943", "0.54749274", "0.54686445", "0.54651624", "0.5458896", "0.545736", "0.5453706", "0.54531956", "0.5442762", "0.5431738", "0.54219776", "0.5419396", "0.54100513", "0.5406647", "0.5393404", "0.5384355", "0.5381418", "0.53541267", "0.5352087", "0.5341692", "0.53386205", "0.5336391", "0.5326067", "0.5316695", "0.5312951", "0.53119767", "0.5293832" ]
0.7487672
2
ip_address checks that a given string is in IP address format
def ip_address(addr): parts = addr.split('.') if len(parts) != 4: raise TypeError('{} does not match an IP address pattern'.format(addr)) for part in parts: try: num = int(part) if num < 0 or num > 255: raise TypeError('{} does not match an IP address pattern'.format(addr)) except ValueError: raise TypeError('{} does not match an IP address pattern'.format(addr)) return addr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_ip(string):\n return ipv4.is_ip(string) or ipv6.is_ip(string)", "def validate_ip_address(ip_addr):\n try:\n ip_object = ipaddress.ip_address(ip_addr)\n return True\n except ValueError:\n return False", "def is_valid_ipaddress(str_ip: str) -> bool:\n try:\n ipaddress.ip_address(str_ip)\n return True\n except ValueError:\n return False", "def check_if_ip(address):\n address_list = map(lambda x: int(x), address.split('.'))\n\n if len(address_list) != 4:\n return False\n for octet in address_list:\n if not 0 <= octet <= 255:\n return False\n if address_list[0] in [0, 10, 127, 255]:\n return False\n return True", "def is_ip_address(value: str) -> bool:\n with suppress(ValueError):\n ipaddress.ip_address(value)\n return True\n\n return False", "def is_ip_address(value):\r\n # IPv6 added with Django 1.4\r\n from django.core.validators import validate_ipv46_address as ip_validator\r\n\r\n try:\r\n ip_validator(value)\r\n except ValidationError:\r\n return False\r\n return True", "def is_valid_ip(address):\n return is_valid_ipv4_address(address) or is_valid_ipv6_address(address)", "def is_ip(value):\n try:\n IP(value)\n except ValueError:\n return False\n return True", "def check_ip_format(self, ip_address):\n # regex for validating an Ip-address \n ip_regex = \"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\/([0-9]|[1-2][0-9]|3[0-2]))?$\"\n\n # validate ip address\n r = re.compile(ip_regex)\n if(r.match(ip_address)):\n print(\"Valid IP address format\")\n self.target = ip_address\n return True\n else:\n print(R + \"{} is an invalid IP address format\".format(ip_address) + W)\n return False", "def is_valid_ip(ip):\n ...", "def is_ip_addr(addr: str, strict: bool = True) -> bool:\n\n try:\n ipaddress.ip_network(addr, strict=strict)\n return True\n except ValueError:\n return False", "def _is_valid_ip(self, address):\r\n try:\r\n # Is this an valid ip address?\r\n ipaddr.IPNetwork(address)\r\n except ValueError:\r\n return False\r\n return True", "def valid_ip(ip_addr):\n try:\n inet_aton(ip_addr)\n return True\n\n except error:\n return False", "def valid_ip(address):\n if len(address.split()) == 1 and (\n valid_ipv4(address) or valid_ipv6(address)\n ):\n return True\n return False", "def is_ip(self,inputs):\n format = '((?:(?:25[0-5]|2[0-4]\\\\d|[01]?\\\\d?\\\\d)\\\\.){3}(?:25[0-5]|2[0-4]\\\\d|[01]?\\\\d?\\\\d))'\n pattern = re.match(format, inputs)\n if pattern is not None:\n return True\n else:\n return False", "def is_ip_address(value, messages=None):\n if value is None:\n return\n _messages = {\n 'type-string': \"must be a string\",\n 'invalid': \"is invalid\",\n }\n if messages:\n _messages.update(messages)\n if not isinstance(value, basestring):\n raise Invalid(_messages['type-string'])\n if _ip_address_regex.match(value) is None:\n raise Invalid(_messages['invalid'])", "def is_ipv4_address(ip): \n octet_range = range(256) \n octets = ip.split('.') \n\n if len(octets) != 4: \n return False \n elif any(not octet.isdigit() for octet in octets): \n return False \n elif any(int(octet) not in octet_range for octet in octets): \n return False \n\n return True", "def valid_ip(ip_address, strict=True):\n import socket\n try:\n socket.inet_aton(ip_address)\n return True\n except socket.error:\n if strict:\n raise ValueError(\"Invalid IP address\")\n return False", "def is_valid_ip_address(address):\n return Convert.is_valid_ipv6_address(\n address) or Convert.is_valid_ipv4_address(address)", "def is_ip(address):\n try:\n socket.inet_pton(socket.AF_INET, address)\n except socket.error:\n try:\n socket.inet_pton(socket.AF_INET6, address)\n except socket.error:\n return False\n return True", "def is_valid_ip(addr):\n\n try:\n socket.inet_aton(addr)\n except socket.error:\n return False\n return True", "def validIPAddress(ip):\n try:\n socket.inet_aton(ip)\n return True\n except socket.error:\n return False", "def verify_ip_address(ip):\n try:\n ipaddress.ip_address(ip)\n return True\n except ValueError:\n return False", "def isIpv4Addr(string):\n return (True)", "def test_re_ip(self, ip_address: str, is_valid_ip: bool):\n self.assertEqual(bool(re_ip.search(ip_address)), is_valid_ip)", "def __is_valid_ipv4_address(self, *args, **kwargs):\n ip = kwargs.get(\"ip\", None)\n\n if ip is None:\n raise ValueError(\"An IP must be provided.\")\n if not isinstance(ip, str):\n raise TypeError(\"The IP address is expected as a string, not %s.\" % (type(ip)))\n\n try:\n inet_pton(AF_INET, ip)\n except AttributeError:\n try:\n inet_aton(ip)\n except error:\n return False\n return ip.count('.') == 3\n except error:\n return False\n return True", "def validate_ip(ip):\n valid_ip = ''\n try:\n valid_ip = str(ipaddress.ip_address(ip))\n except ValueError:\n logging.error('ip address \\'{}\\' is not valid: '.format(ip))\n \n return valid_ip", "def is_actual_ip(self, ip_addr):\n try:\n socket.inet_aton(ip_addr)\n return True\n except socket.error:\n return False", "def isIP(ipToTest):\n \n try:\n socket.inet_aton(ipToTest)\n return True\n except socket.error:\n return False", "def IsValidIP(ip):\n if ip != None:\n if ip.count('.') == 3:\n ipNumbers = ip.split('.')\n for number in ipNumbers:\n if not number.isdigit() or int(number) > 255:\n return False\n return ipNumbers\n return False", "def valid_ip(ip):\n return valid_ipv4(ip) or valid_ipv6(ip)", "def validateIP(ip):\n # type: (str)->None\n try:\n socket.inet_aton(ip)\n except socket.error:\n socket.inet_pton(socket.AF_INET6, ip)", "def validateIP():\n try:\n s = socket.inet_aton(args.target)\n except socket.error:\n print(\"\")\n print(f\"{bad_cmd} Bad IP address\")\n print(\"\")\n sys.exit()", "def is_ipv4_address(s):\n # split the string on dots\n s_split = s.split('.')\n \n return len(s_split) == 4 and all(num.isdigit() and 0 <= int(num) < 256 for num in s_split)", "def is_valid_ipv4_address(address):\n # inet_aton accepts also 2.2.2\n if address.count('.') != 3:\n return False\n # filter out addresses with unexpected characters, like 1.2x2.2.2\n if any(char not in '0123456789.' for char in address):\n return False\n # inet_pton is available only on some platforms, but\n # inet_aton is less restrictive (this is why we need checks above)\n try:\n socket.inet_aton(address)\n except (socket.error, TypeError):\n return False\n return True", "def validate_ip_address(data, valid_values=None):\n\n msg = None\n try:\n # netaddr.core.ZEROFILL is only applicable to IPv4.\n # it will remove leading zeros from IPv4 address octets.\n ip = netaddr.IPAddress(validate_no_whitespace(data),\n flags=netaddr.core.ZEROFILL)\n # The followings are quick checks for IPv6 (has ':') and\n # IPv4. (has 3 periods like 'xx.xx.xx.xx')\n # NOTE(yamamoto): netaddr uses libraries provided by the underlying\n # platform to convert addresses. For example, inet_aton(3).\n # Some platforms, including NetBSD and OS X, have inet_aton\n # implementation which accepts more varying forms of addresses than\n # we want to accept here. The following check is to reject such\n # addresses. For Example:\n # >>> netaddr.IPAddress('1' * 59)\n # IPAddress('199.28.113.199')\n # >>> netaddr.IPAddress(str(int('1' * 59) & 0xffffffff))\n # IPAddress('199.28.113.199')\n # >>>\n if ':' not in data and data.count('.') != 3:\n msg = \"'%s' is not a valid IP address\" % data\n # A leading '0' in IPv4 address may be interpreted as an octal number,\n # e.g. 011 octal is 9 decimal. Since there is no standard saying\n # whether IP address with leading '0's should be interpreted as octal\n # or decimal, hence we reject leading '0's to avoid ambiguity.\n elif ip.version == 4 and str(ip) != data:\n msg = (\"'%(data)s' is not an accepted IP address, \"\n \"'%(ip)s' is recommended\") % {\"data\": data, \"ip\": ip}\n except Exception:\n msg = \"'%s' is not a valid IP address\" % data\n if msg:\n raise exceptions.DiagnoseException(msg)", "def _is_shorthand_ip(ip_str):\r\n if ip_str.count('::') == 1:\r\n return True\r\n if filter(lambda x: len(x) < 4, ip_str.split(':')):\r\n return True\r\n return False", "def is_valid_ipv4_address(address):\n\n if not isinstance(address, (bytes, str_type)):\n return False\n\n # checks if theres four period separated values\n\n if address.count('.') != 3:\n return False\n\n # checks that each value in the octet are decimal values between 0-255\n for entry in address.split('.'):\n if not entry.isdigit() or int(entry) < 0 or int(entry) > 255:\n return False\n elif entry[0] == '0' and len(entry) > 1:\n return False # leading zeros, for instance in '1.2.3.001'\n\n return True", "def _is_valid_ip(ip):\n return _is_valid_ipv4(ip) or _is_valid_ipv6(ip)", "def test_ip(self):\n ##Todo: Improve this check\n ip = socket.gethostbyname(socket.gethostname())\n ip = [int(i) for i in ip.split('.')]\n assert len(ip) == 4\n assert ip[0] == 10\n assert ip[1] == 137\n assert ip[2] == 1\n assert ip[3] >= 1 and ip[3] <= 255", "def isIpv4AddrWithNetmask(string):\n return (True)", "def IP(address):\n for klass in (V4Address, V6Address):\n try:\n ip = klass(address)\n except ValueError, e:\n error = e\n else:\n return ip\n\n raise error", "def isValidIP(ip_add):\n if _check_ip(ip_add):\n return True\n return False", "def is_valid_ip(arg):\n try:\n nacaddr.IP(arg)\n except:\n raise argparse.ArgumentTypeError('%s is an invalid ip address' % arg)\n return arg", "def validate_ip(argname, param, safe, optional = False):\n _validate_one(argname, param, safe, _check_ip, optional)", "def validate_address(address:str) -> bool:\r\n return True", "def validate_ip(ip):\n try:\n ipobj = IPy.IP(ip)\n if ipobj.iptype() == 'PRIVATE':\n print(\"IP addresses {} will be ignored as it is in a private network range.\".format(ip))\n ip = None\n except ValueError as ve:\n print(\"Invalid IP: {}\".format(ve.args))\n ip = None\n finally:\n return ip", "def test_clean_ip(self):\n\n raw_ip = 'client=mail-ed1-f51.google.com[209.85.208.51]'\n result = clean_ip(raw_ip)\n self.assertEqual(result, '209.85.208.51')", "def safe_ip_format(ip):\r\n try:\r\n if netaddr.IPAddress(ip).version == 6:\r\n return '[%s]' % ip\r\n except (TypeError, netaddr.AddrFormatError): # hostname\r\n pass\r\n # it's IPv4 or hostname\r\n return ip", "def _check_ip(val: Any, input_format: str, clean: bool) -> Any:\n try:\n if val in NULL_VALUES:\n return (None, \"null\") if clean else False\n\n address = ip_address(val)\n vers = address.version\n\n if vers == 4 and input_format != \"ipv6\" or vers == 6 and input_format != \"ipv4\":\n return (address, \"success\") if clean else True\n return (None, \"unknown\") if clean else False\n\n except (TypeError, ValueError):\n return (None, \"unknown\") if clean else False", "def is_valid_ipv4_address(address):\n try:\n socket.inet_pton(socket.AF_INET, address)\n except AttributeError: # no inet_pton here, sorry\n try:\n socket.inet_aton(address)\n except socket.error:\n return False\n return address.count('.') == 3\n except socket.error: # not a valid address\n return False\n\n return True", "def is_safe_ip(ip):\n # should define global\n # white_ip_list = ('127.0.0.1', '192.168.1.1')\n try:\n socket.inet_pton(socket.AF_INET, ip)\n except AttributeError:\n try:\n socket.inet_aton(ip)\n except socket.error:\n return False\n except socket.error:\n return False\n\n if ip.count('.') == 3 and ip in current_app.config['API_WHITE_IP_LIST']:\n return True\n else:\n return False", "def check_network_contains_ip(network: IPv4Network, address: str) -> bool:\n ip = IPv4Address(address)\n if ip in network:\n return True\n else:\n return False", "def ip_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, ipaddress._IPAddressBase):\n name = type(var).__name__\n raise IPError(\n 'Function {} expected IP address, {} got instead.'.format(func, name))", "def is_valid_ipv6_address(ip_str):\n try:\n ipaddress.IPv6Address(ip_str)\n except ValueError:\n return False\n return True", "def test_validate_ip_ok():\n ip = '1.1.1.1'\n assert howisresolved.validate_ip(ip) is None", "def is_ipv4(ip):\n try:\n socket.inet_aton(ip)\n except socket.error:\n return False\n return True", "def check_IP_addr(self, iplist):\n\n if type(iplist) != list:\n print(\"Error: please provide a list of IPv4 addresses to check (as a list of strings).\")\n return False\n\n for ip_addr in iplist:\n # Converts ip_addr to string, in case of bad type being passed\n ip_addr = str(ip_addr)\n\n # Checks ip_addr format\n try: \n inet_aton(ip_addr)\n except:\n print(\"Error: '{}' is an invalid IPv4 address.\\n\"\\\n \"Please use a valid IPv4 address (e.g.: 192.168.0.1)\".format(ip_addr))\n return False\n return True", "def __checkIPAddr(self,ip):\n if not iplib.checkIPAddrWithoutMask(ip):\n raise GeneralException(errorText(\"GENERAL\",\"INVALID_IP_ADDRESS\")%ip)", "def extract_ipaddress(string):\n pattern = r\"((([01]?[0-9]?[0-9]|2[0-4][0-9]|25[0-5])[ (\\[]?(\\.|dot)[ )\\]]?){3}([01]?[0-9]?[0-9]|2[0-4][0-9]|25[0-5]))\"\n ips = list()\n h_map = list()\n for match in re.finditer(pattern, string):\n if match.group(0) not in h_map:\n ips.append(wrap_value_with_context(match.group(0),match.start(),match.end()))\n h_map.append(match.group(0))\n\n return ips", "def checkIPValid(self, ipAddr):\n try:\n socket.inet_aton(ipAddr)\n return True\n except socket.error:\n return False", "def valid_ip_address (ip_address):\n return valid_key(ip_address, ip_hash, ip_hash_threshold)", "def containsip(url):\r\n try:\r\n if ip.ip_address(url):\r\n return 1\r\n except:\r\n return 0", "def is_valid_ipv6_address(ip_str):\r\n # We need to have at least one ':'.\r\n if ':' not in ip_str:\r\n return False\r\n\r\n # We can only have one '::' shortener.\r\n if ip_str.count('::') > 1:\r\n return False\r\n\r\n # '::' should be encompassed by start, digits or end.\r\n if ':::' in ip_str:\r\n return False\r\n\r\n # A single colon can neither start nor end an address.\r\n if ((ip_str.startswith(':') and not ip_str.startswith('::')) or\r\n (ip_str.endswith(':') and not ip_str.endswith('::'))):\r\n return False\r\n\r\n # We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)\r\n if ip_str.count(':') > 7:\r\n return False\r\n\r\n # If we have no concatenation, we need to have 8 fields with 7 ':'.\r\n if '::' not in ip_str and ip_str.count(':') != 7:\r\n # We might have an IPv4 mapped address.\r\n if ip_str.count('.') != 3:\r\n return False\r\n\r\n ip_str = _explode_shorthand_ip_string(ip_str)\r\n\r\n # Now that we have that all squared away, let's check that each of the\r\n # hextets are between 0x0 and 0xFFFF.\r\n for hextet in ip_str.split(':'):\r\n if hextet.count('.') == 3:\r\n # If we have an IPv4 mapped address, the IPv4 portion has to\r\n # be at the end of the IPv6 portion.\r\n if not ip_str.split(':')[-1] == hextet:\r\n return False\r\n if not is_valid_ipv4_address(hextet):\r\n return False\r\n else:\r\n try:\r\n # a value error here means that we got a bad hextet,\r\n # something like 0xzzzz\r\n if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:\r\n return False\r\n except ValueError:\r\n return False\r\n return True", "def _parse_addr(self, addr_str):\n addr = [int(i) for i in addr_str.split('.')]\n if len(addr) != 4 or any([i < 0 for i in addr]) or any([i > 255 for i in addr]):\n raise ValueError('Invalid IP address: %s' % addr_str)\n val = 0\n for i in addr:\n val *= 255\n val += i\n return val", "def is_valid_ip(ip):\n if not ip or '\\x00' in ip:\n # getaddrinfo resolves empty strings to localhost, and truncates\n # on zero bytes.\n return False\n try:\n res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC,\n socket.SOCK_STREAM,\n 0, socket.AI_NUMERICHOST)\n return bool(res)\n except socket.gaierror as e:\n if e.args[0] == socket.EAI_NONAME:\n return False\n raise", "def is_valid_address(self, address):\n assert isinstance(address, tuple), type(address)\n assert len(address) == 2, len(address)\n assert isinstance(address[0], str), type(address[0])\n assert isinstance(address[1], int), type(address[1])\n\n if address[0] == \"\":\n return False\n\n if address[0] == \"0.0.0.0\":\n return False\n\n if address[1] <= 0:\n return False\n\n try:\n binary = inet_aton(address[0])\n except socket_error:\n return False\n\n # ending with .0\n#Niels: is now allowed, subnet mask magic call actually allow for this\n# if binary[3] == \"\\x00\":\n# return False\n\n # ending with .255\n if binary[3] == \"\\xff\":\n return False\n\n return True", "def is_valid_ipv4_address(address):\n invalid_list = ['0.0.0.0','255.255.255.255']\n try:\n ip = ipaddress.IPv4Address(address)\n if (ip.is_reserved) or (ip.is_multicast) or (ip.is_loopback) or (address in invalid_list):\n return False\n except ipaddress.AddressValueError:\n return False\n\n return True", "def check_valid_ip_int(value):\n try:\n address = int(value)\n except ValueError:\n raise argparse.ArgumentTypeError('value is not a positive number: {}'.format(value))\n try:\n ipaddress.ip_address(address)\n except ValueError:\n raise argparse.ArgumentTypeError('is out of IPv4/IPv6 boundaries')\n return address", "def valid_ipv4(ip):\n try:\n socket.inet_pton(socket.AF_INET, ip)\n return True\n except AttributeError: # no inet_pton\n try:\n socket.inet_aton(ip)\n return True\n except socket.error:\n return False\n except (socket.error, ValueError) as e:\n return False", "def request_valid_ip():\n ip = input(\"Enter a valid IP address you would like to check: \")\n return validate_ip(ip)", "def ipinfo_ip_check(ip):\n if not is_IPv4Address(ip):\n return None\n\n response = requests.get('http://ipinfo.io/%s/json' % ip)\n return response.json()", "def is_reserved(ip):\n if ip_between(ip, \"0.0.0.0\", \"0.255.255.255\"):\n return True\n elif ip_between(ip, \"10.0.0.0\", \"10.255.255.255\"):\n return True\n elif ip_between(ip, \"100.64.0.0\", \"100.127.255.255\"):\n return True\n elif ip_between(ip, \"127.0.0.0\", \"127.255.255.255\"):\n return True\n elif ip_between(ip, \"169.254.0.0\", \"169.254.255.255\"):\n return True\n elif ip_between(ip, \"172.16.0.0\", \"172.31.255.255\"):\n return True\n elif ip_between(ip, \"192.0.0.0\", \"192.0.0.255\"):\n return True\n elif ip_between(ip, \"192.0.2.0\", \"192.0.2.255\"):\n return True\n elif ip_between(ip, \"192.88.99.0\", \"192.88.99.255\"):\n return True\n elif ip_between(ip, \"192.168.0.0\", \"192.168.255.255\"):\n return True\n elif ip_between(ip, \"198.18.0.0\", \"198.19.255.255\"):\n return True\n elif ip_between(ip, \"198.51.100.0\", \"198.51.100.255\"):\n return True\n elif ip_between(ip, \"203.0.113.0\", \"203.0.113.255\"):\n return True\n elif ip_between(ip, \"224.0.0.0\", \"255.255.255.255\"):\n return True\n else:\n return False", "def isUseableIP(ip_add, mask=None):\n if _check_ip(ip_add):\n ip_split = ip_add.split('.')\n # 如果IP地址以0开头,则不可用\n if ip_split[0] == '0':\n return False\n # 如果IP地址以255开头,则不可用\n if ip_split[0] == '255':\n return False\n # 如果IP地址以127开头,则不可用\n if ip_split[0] == '127':\n return False\n # 如果IP地址以169.254开头,则不可用\n if ip_split[0] == '169' and ip_split[1] == '254':\n return False\n\n ip_num = ip2int(ip_add)\n # 2进制字符串,左补零,共32位\n ip_bit = bin(ip_num)[2:].zfill(32)\n # 过滤全零地址\n if ip_num == 0:\n return False\n # 如果是A类地址,则掩码为255.0.0.0\n if ip_bit[0] == '0':\n mask = mask or \"255.0.0.0\"\n # 如果是B类地址,则掩码为255.255.0.0\n elif ip_bit[:2] == '10':\n mask = mask or \"255.255.0.0\"\n # 如果是C类地址,则掩码为255.255.255.0\n elif ip_bit[:3] == '110':\n mask = mask or \"255.255.255.0\"\n # 其余地址全部不可用\n else:\n return False\n\n # 掩码不合法则不可用\n if not isValidMask(mask):\n return False\n\n # 根据掩码计算子网地址,如果IP为子网地址,则不可用\n subnet = calcSubnet(ip_add, mask)\n if ip_add == subnet:\n return False\n # 根据子网以及掩码计算广播地址,如果IP为广播地址,则不可用\n if ip_add == calcBroadcastBySubnet(subnet, mask):\n return False\n\n return True\n else:\n return False", "def parse_ip(self, ip):\n if not ip in self.ip_list:\n try:\n ip_address = ipaddress.ip_address(ip)\n use = not (\n ip_address.is_multicast or ip_address.is_unspecified or ip_address.is_reserved or ip_address.is_loopback or ip_address.is_link_local)\n if use and (self.include_public or ip_address.is_private):\n self.new_ip(ip)\n network = ipaddress.IPv4Network(\"{}/{}\".format(ip,\n self.netmask), strict=False)\n self.new_range(str(network))\n except ValueError:\n pass", "def filter_ipnet_contains_ip(network_cidr, ip_address):\n try:\n network_cidr_str = unicode(network_cidr)\n ip_address_str = unicode(ip_address)\n except NameError as ex:\n network_cidr_str = str(network_cidr)\n ip_address_str = str(ip_address)\n try:\n return IPv4Address(ip_address_str) in IPv4Network(network_cidr_str)\n except ValueError as ex:\n logging.error(network_cidr_str + \" is not a valid network address\")\n raise", "def test_ip_addresses_exists():\n load_ips()\n validate_names()", "def check_input(data):\n if data.has_key('fqdn') and data.has_key('ip'):\n\n try:\n socket.inet_aton(data['ip'])\n return True\n except socket.error:\n return False", "def IsValidIPV4(ip):\n match = re.match(r'^(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})$', ip)\n if not match:\n return False\n\n octets = [int(x) for x in match.groups()]\n\n # first octet must not be 0\n if octets[0] == 0:\n return False\n\n for n in octets:\n if n < 0 or n > 255:\n return False\n\n return True", "def check_address_format(address):\n if len(address) != 42 or address[:2] != '0x':\n return False\n\n for ch in address[2:]:\n if ch not in \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890\":\n return False\n\n return True", "def is_rfc1918(ip):\n if ip_between(ip, \"10.0.0.0\", \"10.255.255.255\"):\n return True\n elif ip_between(ip, \"172.16.0.0\", \"172.31.255.255\"):\n return True\n elif ip_between(ip, \"192.168.0.0\", \"192.168.255.255\"):\n return True\n else:\n return False", "def handle_hexip(bot, ievent):\n if not ievent.args:\n return ievent.missing('<ip | hex ip>')\n is_a = None\n if _re_hexip.match(ievent.args[0]):\n is_a = 'hexip'\n else:\n try:\n socket.inet_pton(socket.AF_INET, ievent.args[0])\n is_a = 'defip'\n except socket.error:\n pass\n if not is_a:\n ievent.missing('<ip | hex ip>')\n return\n if is_a == 'hexip':\n ip = []\n for i in range(4):\n ip.append(str(int(ievent.args[0][i*2:i*2+2], 16)))\n ip = '.'.join(ip)\n nevent = copy.copy(ievent)\n nevent.args = [ip]\n handle_dns(bot, nevent)\n else:\n test = ievent.args[0].split('.')\n ip = 16777216 * int(test[0]) + 65536 * int(test[1]) + 256 * \\\nint(test[2]) + int(test[3])\n ievent.reply('ip %s = %08x' % (ievent.args[0], ip))", "def handle_ip(bot, ievent):\n try:\n item = ievent.args[0]\n except IndexError:\n ievent.missing('<hostname>')\n return\n try:\n ipnr = socket.gethostbyname(item)\n ievent.reply(ipnr)\n except:\n ievent.reply(\"can't match \" + str(item))", "def is_private_address(address):\n\n if not is_valid_ipv4_address(address):\n raise ValueError(\"'%s' isn't a valid IPv4 address\" % address)\n\n # checks for any of the simple wildcard ranges\n\n if address.startswith('10.') or address.startswith('192.168.') or address.startswith('127.'):\n return True\n\n # checks for the 172.16.* - 172.31.* range\n\n if address.startswith('172.'):\n second_octet = int(address.split('.')[1])\n\n if second_octet >= 16 and second_octet <= 31:\n return True\n\n return False", "def is_valid_ipv4(address):\n try:\n return netaddr.valid_ipv4(address)\n except Exception:\n return False", "def is_valid_ipv6_address(address, allow_brackets = False):\n\n if allow_brackets:\n if address.startswith('[') and address.endswith(']'):\n address = address[1:-1]\n\n if address.count('.') == 3:\n # Likely an ipv4-mapped portion. Check that its vaild, then replace with a\n # filler.\n\n ipv4_start = address.rfind(':', 0, address.find('.')) + 1\n ipv4_end = address.find(':', ipv4_start + 1)\n\n if ipv4_end == -1:\n ipv4_end = None # don't crop the last character\n\n if not is_valid_ipv4_address(address[ipv4_start:ipv4_end]):\n return False\n\n addr_comp = [address[:ipv4_start - 1] if ipv4_start != 0 else None, 'ff:ff', address[ipv4_end + 1:] if ipv4_end else None]\n address = ':'.join(filter(None, addr_comp))\n\n # addresses are made up of eight colon separated groups of four hex digits\n # with leading zeros being optional\n # https://en.wikipedia.org/wiki/IPv6#Address_format\n\n colon_count = address.count(':')\n\n if colon_count > 7:\n return False # too many groups\n elif colon_count != 7 and '::' not in address:\n return False # not enough groups and none are collapsed\n elif address.count('::') > 1 or ':::' in address:\n return False # multiple groupings of zeros can't be collapsed\n\n for entry in address.split(':'):\n if not re.match('^[0-9a-fA-f]{0,4}$', entry):\n return False\n\n return True", "def check_eth_address_format(address):\n if len(address) != 42 or address[:2] != '0x':\n return False\n\n for ch in address[2:]:\n if ch not in \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890\":\n return False\n\n return True", "def enter_ipv4_address():\n\n\t#Checking IP address validity\n\twhile True:\n\t\ttry:\n\t\t\tipv4_address = input(\"\\nEnter an IPv4 address: \")\n\t\texcept KeyboardInterrupt:\n\t\t\tprint('\\nGood bye!\\n')\n\t\t\tsys.exit()\n\n\n\t\t#Checking octets - split ipv4_address string into list a using \".\" as a delimiter \n\t\ta = ipv4_address.split('.')\n\n\t\t#Diagnostics\n\t\t# print('ipv4_address = ', ipv4_address) \n\t\t# print('type ipv4_address =')\n\t\t# type_add = type(ipv4_address) \n\t\t# print(type_add) \n\t\t# print('type a =')\n\t\t# type_a = type(a) \n\t\t# print(type_a) \n\t\t# print('a = ',a) \n\t\t# print('len(a) =', len(a)) \n\t\t# print('a[0] = ', a[0]) \n\t\t# print('int(a[0]) = ', int(a[0])) \n\t\t# print('int(a[1]) = ', int(a[1])) \n\t\t# print('int(a[2]) = ', int(a[2])) \n\t\t# print('int(a[3]) = ', int(a[3])) \n\n\t\t# ipv4_address = 200.44.33.1\n\t\t# type(ipv_address) = <class 'str'>\n\t\t# type(a) = <class 'int'>\n\t\t# a = ['200', '44', '33', '1']\n\t\t# len(a) = 4\n\t\t# a[0] = 200\n\t\t# int(a[0]) = 200\n\t\t# int(a[1]) = 44\n\t\t# int(a[2]) = 33\n\t\t# int(a[3]) = 1\n\n\n\n\t\tif (len(a) == 4) and (1 <= int(a[0]) <= 223) and (int(a[0]) != 127) and (int(a[0]) != 169 or int(a[1]) != 254) and (0 <= int(a[1]) <= 255 and 0 <= int(a[2]) <= 255 and 0 <= int(a[3]) <= 255):\n\t\t\tbreak\n\n\t\telse:\n\t\t\tprint (\"\\nThe IPv4 address is NOT a valid unicast address! Please try again!\\n\")\n\t\t\tcontinue\n\n\treturn ipv4_address", "def str_to_inet(ip: str) -> bytes:\n try:\n return socket.inet_pton(socket.AF_INET, ip)\n except OSError:\n return socket.inet_pton(socket.AF_INET6, ip)", "def parse_addr(addr):\n\ttry:\n\t\tnew_addr = socket.inet_aton(addr)\n\texcept:\n\t\taddr = socket.gethostbyname(addr)\n\t\ttry:\n\t\t\tnew_addr = socket.inet_aton(addr)\n\t\texcept ValueError:\n\t\t\tlogging.exception('Error:')\n\t\t\traise ValueError, 'Invalid address: %s' % addr\n\n\treturn new_addr", "def overlay_ip(ip):\n return \"192.168.{}.{}\".format( *ip.split(\".\")[2:])", "def isofy_ipv4(ip_string, prefix=\"\"):\n ipaddress.IPv4Address(ip_string) # fails for invalid IP\n\n if prefix != \"\":\n prefix_valid = bool(re.match(r\"^.{2}(\\..{4})*?$\", prefix))\n if not prefix_valid:\n raise ValueError(f\"{prefix} cannot be used as ISO prefix, please check formatting\")\n prefix += \".\"\n # IP: split and fill with 0s\n ip_parts = ip_string.split(\".\")\n padded = [p.zfill(3) for p in ip_parts]\n joined = \"\".join(padded)\n # IP: split to chunks à 4 chars\n chunksize = 4\n ip_chunks = [joined[i : i + chunksize] for i in range(0, len(joined), chunksize)]\n # combine\n iso_address = prefix + \".\".join(ip_chunks) + \".00\"\n return iso_address", "def safe_is_non_loopback_address(address: str):\n try:\n return not ipaddress.ip_address(address).is_loopback\n except ValueError:\n return False", "def checkHost(host):\n if \"192.168.\" in host:\n return False\n elif \"169.254.\" in host: #APIPA (Automatic Private Internet Protocol Addressing)\n return False\n elif re.match(\"^(127\\.)\",host):\n return False\n elif re.match(\"^(10\\.)\",host):\n return False\n elif re.match(\"^(172\\.1[6-9]\\.)|(172\\.2[0-9]\\.)|(172\\.3[0-1]\\.)\",host):\n return False\n else:\n return True", "def safe_addr(ip_addr):\n return '.'.join(ip_addr.split('.')[:2] + ['xxx', 'xxx'])", "def is_ip(self) -> bool:\n return self.typ == ETH_P_IP", "def validate_ip_address(self):\n\t\tip_list = webnotes.conn.get_value('Profile', self.user, 'restrict_ip', ignore=True)\n\t\t\n\t\tif not ip_list:\n\t\t\treturn\n\n\t\tip_list = ip_list.replace(\",\", \"\\n\").split('\\n')\n\t\tip_list = [i.strip() for i in ip_list]\n\n\t\tfor ip in ip_list:\n\t\t\tif webnotes.remote_ip.startswith(ip):\n\t\t\t\treturn\n\t\t\t\n\t\twebnotes.msgprint('Not allowed from this IP Address')\n\t\traise webnotes.AuthenticationError", "def format_ip(addr):\n return \\\n str(ord(addr[0])) + '.' + \\\n str(ord(addr[1])) + '.' + \\\n str(ord(addr[2])) + '.' + \\\n str(ord(addr[3]))", "def test_ipv4_validation_success():\n assert is_ipv4('8.8.8.8')", "def vt_ip_check(ip, vt_api):\n if not is_IPv4Address(ip):\n return None\n\n url = 'https://www.virustotal.com/vtapi/v2/ip-address/report'\n parameters = {'ip': ip, 'apikey': vt_api}\n response = requests.get(url, params=parameters)\n try:\n return response.json()\n except ValueError:\n return None" ]
[ "0.8181276", "0.8099573", "0.80747545", "0.80292475", "0.8022243", "0.7864434", "0.78313065", "0.78308326", "0.78138804", "0.7800783", "0.77667433", "0.7727927", "0.7700073", "0.76853204", "0.76780635", "0.76094407", "0.7527797", "0.74806803", "0.74570745", "0.7426013", "0.74125993", "0.73984927", "0.7384964", "0.73721385", "0.7369126", "0.7325939", "0.7315556", "0.7294699", "0.7289279", "0.72534037", "0.7174186", "0.7150624", "0.71069807", "0.7088509", "0.70435715", "0.7031045", "0.70075864", "0.69906664", "0.6988398", "0.69668984", "0.6942907", "0.6922718", "0.6920271", "0.69146115", "0.6892723", "0.688875", "0.68768996", "0.6862457", "0.6839558", "0.68302315", "0.6818913", "0.68181825", "0.67926484", "0.6727732", "0.6723264", "0.6723002", "0.671019", "0.66825867", "0.66729283", "0.66631997", "0.6642169", "0.66398025", "0.6635236", "0.66221845", "0.66149163", "0.6614146", "0.661185", "0.6591844", "0.6591685", "0.6579791", "0.6566305", "0.65533626", "0.65240306", "0.64936864", "0.6485635", "0.647573", "0.643981", "0.6421707", "0.6410754", "0.64066947", "0.63352096", "0.63317895", "0.6330533", "0.6322651", "0.63203526", "0.6300943", "0.62931716", "0.6279557", "0.62734914", "0.62650573", "0.62582964", "0.6253735", "0.62213063", "0.62099457", "0.6200104", "0.61895865", "0.61816156", "0.6179399", "0.6128123", "0.61257386" ]
0.7447207
19
Do not return anything, modify root inplace instead.
def flatten(self, root) -> None: if not root: return None node = root while node: if node.left: rightmost = node.left while rightmost.right: rightmost = rightmost.right rightmost.right = node.right node.right = node.left node.left = None node = node.right return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uproot(self):\n self.__root__ = self\n return self", "def root_replace(self,node):\r\n self.feature_index = node.feature_index\r\n self.threshold = node.threshold\r\n self.label = node.label\r\n self.left = node.left\r\n self.right = node.right\r\n self.substitute = node.substitute\r\n if node.left is not None and node.right is not None:\r\n node.left.parents.remove(node) if node in node.left.parents else node.left.parents\r\n node.left.parents.append(self) if self not in node.left.parents else node.left.parents\r\n node.right.parents.remove(node) if node in node.right.parents else node.right.parents\r\n node.right.parents.append(self) if self not in node.right.parents else node.right.parents", "def _replace(self, x, y):\n y.parent = x.parent\n if x is self.root:\n self.root = y\n return\n elif x is x.parent.left:\n x.parent.left = y\n else:\n x.parent.right = y\n\n self.update(y, -1)", "def update_root(self, action: Action) -> \"MonteCarloSearchTree\":\n if action in self._root.children:\n new_root = self._root.children[action]\n else:\n new_root = self._root.add_child(action)\n self._root.remove_child(new_root)\n self._root = new_root\n return self", "def fix_root(self):\n # In the main bzrlib code, this forces the new tree to use the same\n # tree root as the old tree. But merge-into explicitly doesn't want\n # that. So the first portion is just a copy of the old code, and then\n # we change the rest.\n try:\n self.tt.final_kind(self.tt.root)\n except NoSuchFile:\n self.tt.cancel_deletion(self.tt.root)\n if self.tt.final_file_id(self.tt.root) is None:\n self.tt.version_file(self.tt.tree_file_id(self.tt.root),\n self.tt.root)\n # All we do is skip the step which used to sanitize the root id.", "def recoverTree(self, root: TreeNode) -> None:\n self.tmp, self.left, self.right = None, None, None\n self.helper(root)\n self.left.val, self.right.val = self.right.val, self.left.val", "def _fix_up_to_root(self, idx):\n combine_fn = self._combine_fn\n while idx >= 1:\n # self.data[idx] = combine_fn(self.data[self._left(idx)], self.data[self._right(idx)])\n self.data[idx] = combine_fn(self.data[2 * idx], self.data[2 * idx + 1])\n # idx = self._parent(idx)\n idx = idx >> 1", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def recoverTree(self, root: Optional[TreeNode]) -> None:\n self.inorder(root)\n self.first.val,self.second.val=self.second.val,self.first.val", "def edit_root(self) -> Generator[Root, None, None]:\n with self.edit(Root.type) as root:\n if not isinstance(root, Root):\n raise RuntimeError(\"Unexpected root type\")\n yield root", "def set_relative_root(self, root):\r\n self.root = root", "def set_relative_root(self, root):\r\n self.root = root", "def root_orig(self):\n if hasattr(self, \"orig\"):\n return self.orig.root_orig\n return self", "def set_root(self, x, root):\n\n while self.P[x] < x:\n\n j = self.P[x]\n self.P[x] = root\n x = j\n\n self.P[x] = root", "def updateTree(self):\n self.reset()\n self.resetTree() \n self.read()", "def clone_as_root(self) :\n clone = deepcopy(self)\n clone.parent = None\n clone.path_length = 0\n clone.previous_action = None\n return clone", "def recoverTree(self, root: TreeNode) -> None:\n self.inorder(root)\n self.first.val, self.second.val = self.second.val, self.first.val", "def update_with_move(self, point, last_move):\n if point == -1:\n # reset the tree\n self._root = TreeNode(None, 1.0)\n else:\n self._root = self._root._children[point][last_move]\n self._root._parent = None", "def set_relative_root(self, root):\n self.root = root", "def rebalance_root(self):\n split_dirs = [d.split('/') for d in self.directories]\n new_root = []\n for level in zip(*split_dirs):\n if not(all([d == level[0] for d in level])):\n break\n new_root.append(level[0])\n self.root = '/'.join(new_root)", "def set_root(self):\n try:\n _check_call(_LIB.TreeliteTreeBuilderSetRootNode(\n self.tree.handle,\n ctypes.c_int(self.node_key)))\n except AttributeError:\n raise TreeliteError('This node has never been inserted into a tree; '\\\n + 'a node must be inserted before it can be a root')", "def _on_root_finder_update(self, change):\n if hasattr(self, \"_root_finder\"):\n del self._root_finder", "def root(self, node):\n\n if self.set[node] == node:\n return node\n\n self.set[node] = self.root(self.set[node])\n return self.set[node]", "def recoverTree(self, root: TreeNode) -> None:\n if not root:\n return\n self.pre = None\n self.m1 = None\n self.m2 = None\n self.helper(root)\n self.m1.val,self.m2.val = self.m2.val, self.m1.val", "def updatetree(self):\n if self.node:\n self.node.update()\n self.draw()", "def recoverTree(self, root):\n it = self.isValidBST(root)\n a, b = next(it)\n c = next(it, None)\n if c:\n _, c = c\n a.val, c.val = c.val, a.val\n else:\n a.val, b.val = b.val, a.val\n return root", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError(\"root exists\")\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def SyncRoot(self) -> object:", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value) # - leaf_value because the MCTS tree is a max-min tree\n self.update(leaf_value)", "def _add_root(self, data):\n if self._root is not None:\n raise ValueError(\"Root exists\")\n self._size = 1\n self._root = self._Node(data)\n return self._make_position(self._root)", "def _root(self, ind):\n while (ind != self._id[ind]):\n #make every other node in path to point to its grandparent\n self._id[ind] = self._id[self._id[ind]]\n ind = self._id[ind]\n return ind", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def __init__(self):\n self.root = self.get_new_node();", "def test_removing_root(item):\n item.root = None\n assert not item.has_root", "def clean():\n new_tree = None", "def _replace_node(self, nxt, node):\n nxt.left = node.left\n nxt.right = node.right\n nxt.parent = node.parent\n if node is self.root:\n self.root = nxt\n if nxt.left:\n nxt.left.parent = nxt\n if nxt.right:\n nxt.right.parent = nxt\n if nxt.parent:\n if nxt.parent.right is node:\n nxt.parent.right = nxt\n else:\n nxt.parent.left = nxt", "def _rotate_left(self, og_root):\r\n new_root = og_root.right\r\n og_root.right = new_root.left\r\n if new_root.left:\r\n new_root.left.parent = og_root\r\n new_root.parent = og_root.parent\r\n if og_root is self.root: # if our original root of the rotation is the tree root, replace tree root with new root\r\n self.root = new_root\r\n else:\r\n if og_root is og_root.parent.left:\r\n og_root.parent.left = new_root\r\n else:\r\n og_root.parent.right = new_root\r\n new_root.left = og_root\r\n og_root.parent = new_root", "def flatten(self, root: TreeNode) -> None:\n if not root: return\n self.flatten(root.right)\n self.flatten(root.left)\n root.right = self.last\n root.left = None\n self.last = root", "def flatten(self, root: TreeNode) -> None:\n if not root:\n return\n left = root.left\n right = root.right\n root.left = None\n self.flatten(left)\n self.flatten(right)\n root.right = left\n cur = root\n while cur.right:\n cur = cur.right\n cur.right = right", "def replace_by_etree(self, root_el, el_idx=0):\n el = self.get_element_by_name(root_el.tag, el_idx)\n el[:] = list(root_el)\n el.attrib = root_el.attrib", "def flatten(self, root: TreeNode) -> None:\n if not root:\n return None\n\n self.flatten(root.right)\n self.flatten(root.left)\n\n root.right = self.prev\n root.left = None\n self.prev = root", "def delete_root(self, node):\n current = node\n successor = self.find_successor(current) \n temp_height = current.height\n current.height = successor.height\n successor.height = temp_height\n\n if successor != None:\n self.root = successor\n parent = successor.parent\n\n if successor.parent != node:\n if parent.left == successor:\n parent.left = successor.left\n else:\n parent.right = successor.right\n if node.left != successor:\n successor.left = node.left\n else:\n successor.left = None\n if node.right != successor:\n successor.right = node.right \n else:\n successor.right = None\n\n else:\n ancestor = node.left\n ancestor.parent = None\n self.root = ancestor\n del self.nodes[node.key]", "def _rotate_right(self, og_root):\r\n new_root = og_root.left\r\n og_root.left = new_root.right\r\n if new_root.right:\r\n new_root.right.parent = og_root\r\n new_root.parent = og_root.parent\r\n if og_root.value == self.root.value: # og_root is tree root\r\n self.root = new_root\r\n else:\r\n if og_root is og_root.parent.right:\r\n og_root.parent.right = new_root\r\n else:\r\n og_root.parent.left = new_root\r\n new_root.right = og_root\r\n og_root.parent = new_root", "def patch(lines):\n if not get_root():\n set_root(os.getcwd())", "def set_root(self, root):\n self.root = root\n if self.root is not None:\n correct_type(root, Tag)", "def alter_tree(node):\n if not node.input:\n return _alter_node(node)\n\n converted_children = []\n for input_op in node.input:\n converted_children.append(alter_tree(input_op))\n node.input = converted_children\n return _alter_node(node)", "def set_root(self, root):\n self.root = root\n self.sites = [root]", "def flatten(self, root: TreeNode) -> None:\n if root is None :\n return\n if self.node is not None :\n self.node.left = None\n self.node.right = root\n self.node = root\n right = root.right\n self.flatten(root.left)\n self.flatten(right)", "def recoverTree(self, root: Optional[TreeNode]) -> None:\n stack = []\n\n vals = []\n node = root\n while stack or node:\n while node:\n stack.append(node)\n node = node.left\n node = stack.pop()\n vals.append(node.val)\n node = node.right\n\n vals.sort()\n node = root\n i = 0\n while stack or node:\n while node:\n stack.append(node)\n node = node.left\n node = stack.pop()\n node.val = vals[i]\n i += 1\n node = node.right", "def reverse(self):\n self.root.reverse()", "def update_subtree(self, old_subroot: 'GraphNode', new_subroot: 'GraphNode'):\n self.operator.update_subtree(old_subroot, new_subroot)", "def _insert(self, root: AVLTreeNode, key, val=None) -> AVLTreeNode:\n if not root:\n return AVLTreeNode(key, val, bf=0) # If empty root this is the root of new tree\n if key < root.key:\n left_sub_root = self._insert(root.left, key, val) # insert and update left subroot\n root.left = left_sub_root\n left_sub_root.parent = root # assign the parent\n elif key > root.key:\n right_sub_root = self._insert(root.right, key, val) # insert and update right subroot\n root.right = right_sub_root\n right_sub_root.parent = root\n else:\n return root # no duplicate keys allowed; no insertion, return current root as is\n # finally, update heights and bf's of current root after insertion completed (postorder processing)\n root.height = max(self._get_height(root.left), self._get_height(root.right)) + 1\n root.bf = self._get_height(root.left) - self._get_height(root.right)\n return self.rebalance(root) # RE-BALANCE CURRENT ROOT (if required)", "def flatten(self, root: TreeNode) -> None:\n # User must pass a node\n if root:\n root_flatten = TreeNode(root.val)\n leaf = inOrderTreeWalk(root, root_flatten)\n root.left = None\n root.right = root_flatten.right.right", "def replace_node(self, node,new_node):\n #Special Case: Replace the root.\n if node == self.root :\n self.root = new_node\n return\n parent = node.parent\n if parent.left and parent.left == node:\n parent.left = new_node\n elif parent.right and parent.right == node:\n parent.right = new_node\n else:\n print(\"Incorrect Parent-Child relation!\")\n raise RuntimeError", "def recoverTree(self, root):\n # 线性空间复杂度\n # 存储树节点的值\n treeVal = []\n # 存储树的节点\n treePointer = []\n # 中序遍历\n self.inorder(root, treeVal, treePointer)\n treeVal.sort()\n for i in range(len(treeVal)):\n treePointer[i].val = treeVal[i]", "def flatten(self, root: TreeNode) -> None:\n self.previous = TreeNode()\n self.traverse(root)\n return root", "def reset_tree(self):\n self.root = None\n self.action = None\n self.dist_probability = None", "def regenerate_tree(self, newpos):\n self.path = self.tree[newpos][2]\n self.tree = self.get_tree()\n self.pos = self.get_curpos()", "def __update(self, idx):\n parent = (idx - 1) // 2\n while parent >= 0:\n left, right = 2 * parent + 1, 2 * parent + 2\n self.__tree[parent] = self.__tree[left] + self.__tree[right]\n parent = (parent - 1) // 2", "def recoverTree(self, root: TreeNode) -> None:\n self.firstNode = None\n self.secondNode = None\n self.preNode = TreeNode(float(\"-inf\"))\n\n def in_order(root):\n if not root:\n return\n in_order(root.left)\n if self.firstNode == None and self.preNode.val >= root.val:\n self.firstNode = self.preNode\n if self.firstNode and self.preNode.val >= root.val:\n self.secondNode = root\n self.preNode = root\n in_order(root.right)\n\n in_order(root)\n self.firstNode.val, self.secondNode.val = self.secondNode.val, self.firstNode.val", "def _refresh_tree_ref(self):\n self._tree_ref = RedBlackNodeRef(\n address=self._storage.get_root_address())", "def remove_first(self) -> bool:\n #tree isempty\n if self.root is None:\n return False\n\n #root== leaf\n if self.is_leaf(self.root):\n self.root = None\n return True\n\n #root has!= right tree\n if self.root.right is None:\n self.root = self.root.left\n return True\n\n #right tree\n #right tree\n replace_node = self.root.right\n replace_parent = self.root\n left_bool = False\n while replace_node.left is not None:\n replace_parent = replace_node\n replace_node = replace_node.left\n left_bool = True\n\n # remove left\n if left_bool:\n replace_parent.left = replace_node.right\n else:\n replace_parent.right = replace_node.right\n\n # insert left into root\n replace_node.left = self.root.left\n replace_node.right = self.root.right\n self.root = replace_node\n return True", "def _correct_tree(self, current_element: Node):\r\n while True:\r\n if current_element == None or current_element.parent() == None:\r\n return None\r\n current_element = current_element.parent()\r\n b1 = current_element.balance()\r\n\r\n try:\r\n b2 = current_element.right_son().balance()\r\n except AttributeError:\r\n b2 = 0\r\n try:\r\n b3 = current_element.right_son().left_son().balance()\r\n except AttributeError:\r\n b3 = 0\r\n\r\n if b1 in (-1, 0, 1):\r\n if current_element.parent() == None:\r\n break\r\n else:\r\n continue\r\n elif ((b1 == -2 and b2 == 1 and b3 == -1) or\r\n (b1 == -2 and b2 == 1 and b3 == 0 ) or\r\n (b1 == -2 and b2 == 1 and b3 == 1)):\r\n current_element.reset(*self._right_left(current_element))\r\n elif b1 == -2:\r\n current_element.reset(*self._right_right(current_element))\r\n break\r\n\r\n try:\r\n b2 = current_element.left_son().balance()\r\n except AttributeError:\r\n b2 = 0\r\n try:\r\n b3 = current_element.left_son().right_son().balance()\r\n except AttributeError:\r\n b3 = 0\r\n\r\n if ((b1 == 2 and b2 == 2 and b3 == 2) or\r\n (b1 == -1 and b2 == -1 and b3 == -1) or\r\n (b1 == -1 and b2 == 0 and b3 == 1) or\r\n (b1 == 2 and b2 == -1 and b3 == 0)):\r\n current_element.reset(*self._left_right(current_element))\r\n elif b1 == 2:\r\n current_element.reset(*self._left_left(current_element))\r\n break\r\n \r\n if current_element.parent() == None:\r\n break", "def clear(self):\n self.root = None", "def apply(self, tree):\n raise NotImplementedError()", "def put(self, key, value):\n if key is None:\n return\n self.root = put_in_subtree(self.root, key, value)\n self.root.colour = False # make sure that the root is black", "def _uproot(self):\n left, right = self.left, self.right\n if left is not None:\n left.parent = None\n if right is not None:\n right.parent = None\n return left, right", "def build():\n root = TreeNode(3)\n root.left = TreeNode(2)\n root.right = TreeNode(4)\n root.right.right = TreeNode(7)\n root.right.left = TreeNode(5)\n return root", "def write_root(self, root: Position) -> None:\n # Write only the body of the root.\n self.write_body(root)\n # Write all nodes of the tree, except ignored nodes.\n self.level_offset = self.compute_level_offset(root)\n self.root_level = root.level()\n p = root.threadNext() # Returns a copy.\n after = root.nodeAfterTree()\n while p and p != after:\n h = p.h.rstrip()\n if g.match_word(h, 0, '@ignore-tree'):\n p.moveToNodeAfterTree()\n continue\n if g.match_word(h, 0, '@ignore-node'):\n p.moveToThreadNext()\n continue\n if not g.match_word(h, 0, '@no-head'):\n self.write_headline(p)\n self.write_body(p)\n p.moveToThreadNext()", "def _restore_global_position(x, root_pos, root_idx=None):\n x = x + root_pos\n if root_idx is not None:\n x = np.insert(x, root_idx, root_pos.squeeze(1), axis=1)\n return x", "def root_nodes(self, node1, node2, distance):\n if node1 == node2.parent:\n upper_node = node1\n lower_node = node2\n upper_dist, lower_dist = distance, lower_node.branch - distance\n elif node2 == node1.parent:\n upper_node = node2\n lower_node = node1\n upper_dist, lower_dist = lower_node.branch - distance, distance\n else:\n raise PhyloValueError('root_nodes() requires that one of the given nodes is the parent of the other.')\n if len(self.root.children) <= 1:\n raise PhyloValueError('cannot re-root a tree where the existing root has one or no children.')\n elif len(self.root.children) == 2:\n if upper_node == self.root:\n # Just need to adjust branch lengths\n root_child = self.root.children[1] if self.root.children[0] == lower_node else self.root.children[0]\n root_child.branch += upper_dist\n lower_node.branch = lower_dist\n else:\n upper_path = self.find_path_to_root(upper_node)\n # Process the old root child after removing the root:\n root_child = self.root.children[1] if self.root.children[0] == upper_path[1] else self.root.children[0]\n root_child.branch += upper_path[1].branch\n root_child.parent = upper_path[1]\n upper_path[1].children.append(root_child)\n # Process nodes between root and upper_node:\n prev_node = upper_path[1]\n for next_node in upper_path[2:]:\n prev_node.children.remove(next_node)\n prev_node.parent = next_node\n next_node.children.append(prev_node)\n prev_node.branch = next_node.branch\n prev_node = next_node\n # Process upper_node, lower_node, and the new root\n upper_node.parent = lower_node.parent = self.root\n upper_node.children.remove(lower_node)\n self.root.children = [node1, node2] # Keeps the argument order\n upper_node.branch = upper_dist\n lower_node.branch = lower_dist\n else: # If the root has 3 children it means it's an unrooted tree\n new_root = self.new_tree_node()\n new_root.branch = self.root.branch # Transfers any existing root branch\n if upper_node != self.root:\n upper_path = self.find_path_to_root(upper_node)\n prev_node = self.root\n for next_node in upper_path[1:]:\n prev_node.children.remove(next_node)\n prev_node.parent = next_node\n next_node.children.append(prev_node)\n prev_node.branch = next_node.branch\n prev_node = next_node\n upper_node.children.remove(lower_node)\n upper_node.branch = upper_dist\n lower_node.branch = lower_dist\n new_root.children.append(upper_node)\n new_root.children.append(lower_node)\n upper_node.parent = lower_node.parent = new_root\n self.root = new_root\n self.process_tree_nodes()", "def update(self):\n diff = self._diff()\n if not diff:\n # Nothing to do!\n return\n self.parent.update_node(self, diff)", "def replace_subtree(self, tree, update_tree=True):\n if self.parent is None: # Changing the whole tree\n self.__dict__ = tree.__dict__\n else:\n if self is self.parent.left_subtree:\n self.parent.left_subtree = tree\n else:\n self.parent.right_subtree = tree\n if update_tree:\n self.update_tree()\n return self", "def _restore_root_target_weight(target_weight, root_weight, root_idx=None):\n if root_idx is not None:\n root_weight = np.full(target_weight.shape[0], root_weight, dtype=target_weight.dtype)\n target_weight = np.insert(target_weight, root_idx, root_weight[:, None], axis=1)\n return target_weight", "def update(self, tree_path, value):\n\t\traise NotImplementedError", "def __init__(self, root: Node = None):\n # this alllows us to initialize by copying an existing tree\n self.root = deepcopy(root)\n if self.root:\n self.root.parent = None\n self.size = 0 if not self.root else self.root.subtree_size()", "def set_root(self, xpath):\n if xpath[:2] is not '//':\n # Add the // to the front of the string if it isn't there\n self.root = self.tree.xpath('//{}'.format(xpath))\n self.base = self.root[0].base\n return self.root\n self.root = self.tree.xpath(xpath)\n self.base = self.root[0].base\n return self.root", "def _root():\n return 0", "def temporary(self, path):\r\n if path is None:\r\n raise ValueError('Can only temporarily establish a build root given a path.')\r\n prior = self._root_dir\r\n self._root_dir = path\r\n try:\r\n yield\r\n finally:\r\n self._root_dir = prior", "def delete(self, val):\n\n\t\tself.root = self.deleteHelper(self.root, val)\n\t\tself.numNodes = 0\n\t\tif self.root:\n\t\t\tQ = [self.root]\n\t\t\twhile Q:\n\t\t\t\tnode = Q.pop(0)\n\t\t\t\tif node.left:\n\t\t\t\t\tQ.append(node.left)\n\t\t\t\tif node.right:\n\t\t\t\t\tQ.append(node.right)\n\t\t\t\tself.numNodes += 1", "def prune_tree ( self ):\n tree = copy.deepcopy ( self.tree )\n change_made = True\n # As long as changes are made, recursively prune from the root node.\n while change_made:\n change_made = self.prune_node ( tree, tree.root )\n return tree\n # End prune_tree()", "def root(self):\n return self._make_position(self._root)", "def invert_binary_tree(root):\n if root is None:\n return None\n left = invert_binary_tree(root.left)\n right = invert_binary_tree(root.right)\n root.left = right\n root.right = left\n return root", "def mutate(self):\n num_leafs_before = self.num_leafs()\n non_leafs = [v for v, d in self.out_degree() if d > 0]\n box = non_leafs[np.random.choice(len(non_leafs))]\n children = list(self[box])\n for child in children:\n self.remove_subtree(child)\n num_leafs_after = self.num_leafs()\n num_removed = num_leafs_before - num_leafs_after\n self.generate(num_removed)", "def leaf_replace(self, node):\r\n if self.label is not None: # return if leaf node\r\n return\r\n left, right = self.left, self.right\r\n left.parents.remove(self) if self in left.parents else left.parents\r\n right.parents.remove(self) if self in right.parents else right.parents\r\n if node.label is None:\r\n internal = [node]\r\n else:\r\n internal = []\r\n while len(internal) > 0:\r\n l = internal.pop(0)\r\n if l.left.label is not None: # leaf\r\n if l.left.label == 0:\r\n l.left = left\r\n left.parents.append(l) if l not in left.parents else left.parents\r\n elif l.left.label == 1:\r\n l.left = right\r\n right.parents.append(l) if l not in right.parents else right.parents\r\n else:\r\n internal.append(l.left)\r\n\r\n if l.right.label is not None: # leaf\r\n if l.right.label == 0:\r\n l.right = left\r\n left.parents.append(l) if l not in left.parents else left.parents\r\n elif l.right.label == 1:\r\n l.right = right\r\n right.parents.append(l) if l not in right.parents else right.parents\r\n else:\r\n internal.append(l.right)", "def add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._root = self._Node(e)\n self._size = 1\n return self._root", "def test_on_copy_not_on_root():\n builder = TreeBuilder()\n builder.create_root(0)\n builder.add_child(5)\n builder.add_child(6, move=True)\n\n _ = builder.build()\n builder.add_child(7)\n\n t = builder.build()\n assert_tree_structure(t, {(): 0, (0, ): 5, (1, ): 6, (1, 0): 7})", "def _tree_update(self, new_tree: Tree, event: Event):\n raise NotImplementedError()", "def update(\n self, index: Union[int, np.ndarray], value: Union[float, np.ndarray]\n ):\n\n tree_index = self.capacity + index\n self._tree[tree_index] = value\n\n # Propagate up the tree.\n parent = tree_index // 2\n while np.any(parent > 0):\n left = self._tree[2 * parent] # Children/sibling.\n right = self._tree[2 * parent + 1]\n # Note: Due to possible floating point error in the sum-tree case,\n # it's safer to recompute the parent nodes directly rather than to\n # accumulate an \"update\" up the tree which could be faster.\n self._tree[parent] = self.operation(left, right)\n parent = parent // 2", "def reset(self):\r\n self._root_dir = None", "def __root(T: \"Graph\"):\n T_copy = T.copy()\n\n # Leaves are removed from the copy untill 1 or 2 vertices remain\n while len(T_copy.vertices) > 2:\n vertices_to_remove = []\n for v in T_copy.vertices:\n if v.degree == 1:\n vertices_to_remove.append(v)\n for v in vertices_to_remove:\n T_copy.del_vertex(v)\n\n root_labels = []\n for v in T_copy.vertices:\n root_labels.append(v.label)\n\n # From the original tree, the roots are returned\n T_root = []\n for v in T.vertices:\n if v.label in root_labels:\n T_root.append(v)\n\n return T_root", "def recoverTree(self, root: TreeNode) -> None:\n if not root:\n return\n if root.left and root.left.val > root.val:\n root.left.val, root.val = root.val, root.left.val\n return\n if root.right and root.right.val < root.val:\n root.right.val, root.val = root.val, root.right.val\n return\n self.recoverTree(root.left)\n self.recoverTree(root.right)", "def recoverTree(self, root: TreeNode) -> None:\n # base case\n if not root:\n return\n # a list to store node to be exchange\n change = []\n lst = self.inorder(root)\n for i in range(len(lst)-1):\n if lst[i+1].val < lst[i].val:\n # If we already found the first one i, the seconde one would be i+1\n # you can find that in the second example given by Leetcode\n if change:\n change.append(i+1)\n else:\n change.append(i)\n # exchange elements\n if len(change) == 1:\n lst[change[0]].val, lst[change[0]+1].val = lst[change[0]+1].val, lst[change[0]].val\n else:\n lst[change[0]].val, lst[change[1]].val = lst[change[1]].val, lst[change[0]].val" ]
[ "0.71672195", "0.70915145", "0.7055208", "0.69615245", "0.67885584", "0.66141015", "0.6551368", "0.6512747", "0.6512747", "0.6512747", "0.6512747", "0.6431704", "0.6411715", "0.6395968", "0.6395968", "0.6368781", "0.63604164", "0.6329711", "0.6324841", "0.6305654", "0.6276602", "0.62755746", "0.6252733", "0.6252057", "0.6237605", "0.62375796", "0.62320226", "0.62185884", "0.62125385", "0.61870265", "0.61870265", "0.61870265", "0.6182107", "0.61427754", "0.6142155", "0.6130873", "0.61242235", "0.6089576", "0.60815316", "0.60815316", "0.6071955", "0.6069354", "0.6068959", "0.60666305", "0.6065871", "0.6056988", "0.6038969", "0.60272694", "0.6019179", "0.60045314", "0.5997348", "0.59790534", "0.5971439", "0.59623253", "0.59509313", "0.59448934", "0.59348404", "0.5934561", "0.5918493", "0.59151924", "0.59056365", "0.590243", "0.58918774", "0.5881823", "0.5879029", "0.5852593", "0.5844548", "0.5844056", "0.5839235", "0.5837182", "0.5826454", "0.58195037", "0.5802826", "0.5798592", "0.57956165", "0.5795559", "0.57952935", "0.5788895", "0.5788525", "0.5779543", "0.5777743", "0.57711816", "0.5769816", "0.57642585", "0.57626456", "0.57606506", "0.57559395", "0.5754561", "0.5748984", "0.57412434", "0.5736784", "0.5735515", "0.5735303", "0.5734825", "0.57278997", "0.5727807", "0.57274973", "0.5716388", "0.57101053", "0.5705146", "0.56903195" ]
0.0
-1
Do not return anything, modify root inplace instead.
def flatten(self, root) -> None: node = root stack = [] while node: if node.left: if node.right: stack.append(node.right) node.right = node.left node.left = None if not node.left and not node.right and stack: node.right = stack.pop() node = node.right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uproot(self):\n self.__root__ = self\n return self", "def root_replace(self,node):\r\n self.feature_index = node.feature_index\r\n self.threshold = node.threshold\r\n self.label = node.label\r\n self.left = node.left\r\n self.right = node.right\r\n self.substitute = node.substitute\r\n if node.left is not None and node.right is not None:\r\n node.left.parents.remove(node) if node in node.left.parents else node.left.parents\r\n node.left.parents.append(self) if self not in node.left.parents else node.left.parents\r\n node.right.parents.remove(node) if node in node.right.parents else node.right.parents\r\n node.right.parents.append(self) if self not in node.right.parents else node.right.parents", "def _replace(self, x, y):\n y.parent = x.parent\n if x is self.root:\n self.root = y\n return\n elif x is x.parent.left:\n x.parent.left = y\n else:\n x.parent.right = y\n\n self.update(y, -1)", "def update_root(self, action: Action) -> \"MonteCarloSearchTree\":\n if action in self._root.children:\n new_root = self._root.children[action]\n else:\n new_root = self._root.add_child(action)\n self._root.remove_child(new_root)\n self._root = new_root\n return self", "def fix_root(self):\n # In the main bzrlib code, this forces the new tree to use the same\n # tree root as the old tree. But merge-into explicitly doesn't want\n # that. So the first portion is just a copy of the old code, and then\n # we change the rest.\n try:\n self.tt.final_kind(self.tt.root)\n except NoSuchFile:\n self.tt.cancel_deletion(self.tt.root)\n if self.tt.final_file_id(self.tt.root) is None:\n self.tt.version_file(self.tt.tree_file_id(self.tt.root),\n self.tt.root)\n # All we do is skip the step which used to sanitize the root id.", "def recoverTree(self, root: TreeNode) -> None:\n self.tmp, self.left, self.right = None, None, None\n self.helper(root)\n self.left.val, self.right.val = self.right.val, self.left.val", "def _fix_up_to_root(self, idx):\n combine_fn = self._combine_fn\n while idx >= 1:\n # self.data[idx] = combine_fn(self.data[self._left(idx)], self.data[self._right(idx)])\n self.data[idx] = combine_fn(self.data[2 * idx], self.data[2 * idx + 1])\n # idx = self._parent(idx)\n idx = idx >> 1", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def recoverTree(self, root: Optional[TreeNode]) -> None:\n self.inorder(root)\n self.first.val,self.second.val=self.second.val,self.first.val", "def edit_root(self) -> Generator[Root, None, None]:\n with self.edit(Root.type) as root:\n if not isinstance(root, Root):\n raise RuntimeError(\"Unexpected root type\")\n yield root", "def set_relative_root(self, root):\r\n self.root = root", "def set_relative_root(self, root):\r\n self.root = root", "def root_orig(self):\n if hasattr(self, \"orig\"):\n return self.orig.root_orig\n return self", "def set_root(self, x, root):\n\n while self.P[x] < x:\n\n j = self.P[x]\n self.P[x] = root\n x = j\n\n self.P[x] = root", "def updateTree(self):\n self.reset()\n self.resetTree() \n self.read()", "def clone_as_root(self) :\n clone = deepcopy(self)\n clone.parent = None\n clone.path_length = 0\n clone.previous_action = None\n return clone", "def recoverTree(self, root: TreeNode) -> None:\n self.inorder(root)\n self.first.val, self.second.val = self.second.val, self.first.val", "def update_with_move(self, point, last_move):\n if point == -1:\n # reset the tree\n self._root = TreeNode(None, 1.0)\n else:\n self._root = self._root._children[point][last_move]\n self._root._parent = None", "def set_relative_root(self, root):\n self.root = root", "def rebalance_root(self):\n split_dirs = [d.split('/') for d in self.directories]\n new_root = []\n for level in zip(*split_dirs):\n if not(all([d == level[0] for d in level])):\n break\n new_root.append(level[0])\n self.root = '/'.join(new_root)", "def set_root(self):\n try:\n _check_call(_LIB.TreeliteTreeBuilderSetRootNode(\n self.tree.handle,\n ctypes.c_int(self.node_key)))\n except AttributeError:\n raise TreeliteError('This node has never been inserted into a tree; '\\\n + 'a node must be inserted before it can be a root')", "def _on_root_finder_update(self, change):\n if hasattr(self, \"_root_finder\"):\n del self._root_finder", "def root(self, node):\n\n if self.set[node] == node:\n return node\n\n self.set[node] = self.root(self.set[node])\n return self.set[node]", "def recoverTree(self, root: TreeNode) -> None:\n if not root:\n return\n self.pre = None\n self.m1 = None\n self.m2 = None\n self.helper(root)\n self.m1.val,self.m2.val = self.m2.val, self.m1.val", "def updatetree(self):\n if self.node:\n self.node.update()\n self.draw()", "def recoverTree(self, root):\n it = self.isValidBST(root)\n a, b = next(it)\n c = next(it, None)\n if c:\n _, c = c\n a.val, c.val = c.val, a.val\n else:\n a.val, b.val = b.val, a.val\n return root", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError(\"root exists\")\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def SyncRoot(self) -> object:", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value) # - leaf_value because the MCTS tree is a max-min tree\n self.update(leaf_value)", "def _add_root(self, data):\n if self._root is not None:\n raise ValueError(\"Root exists\")\n self._size = 1\n self._root = self._Node(data)\n return self._make_position(self._root)", "def _root(self, ind):\n while (ind != self._id[ind]):\n #make every other node in path to point to its grandparent\n self._id[ind] = self._id[self._id[ind]]\n ind = self._id[ind]\n return ind", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def __init__(self):\n self.root = self.get_new_node();", "def test_removing_root(item):\n item.root = None\n assert not item.has_root", "def clean():\n new_tree = None", "def _replace_node(self, nxt, node):\n nxt.left = node.left\n nxt.right = node.right\n nxt.parent = node.parent\n if node is self.root:\n self.root = nxt\n if nxt.left:\n nxt.left.parent = nxt\n if nxt.right:\n nxt.right.parent = nxt\n if nxt.parent:\n if nxt.parent.right is node:\n nxt.parent.right = nxt\n else:\n nxt.parent.left = nxt", "def _rotate_left(self, og_root):\r\n new_root = og_root.right\r\n og_root.right = new_root.left\r\n if new_root.left:\r\n new_root.left.parent = og_root\r\n new_root.parent = og_root.parent\r\n if og_root is self.root: # if our original root of the rotation is the tree root, replace tree root with new root\r\n self.root = new_root\r\n else:\r\n if og_root is og_root.parent.left:\r\n og_root.parent.left = new_root\r\n else:\r\n og_root.parent.right = new_root\r\n new_root.left = og_root\r\n og_root.parent = new_root", "def flatten(self, root: TreeNode) -> None:\n if not root: return\n self.flatten(root.right)\n self.flatten(root.left)\n root.right = self.last\n root.left = None\n self.last = root", "def flatten(self, root: TreeNode) -> None:\n if not root:\n return\n left = root.left\n right = root.right\n root.left = None\n self.flatten(left)\n self.flatten(right)\n root.right = left\n cur = root\n while cur.right:\n cur = cur.right\n cur.right = right", "def replace_by_etree(self, root_el, el_idx=0):\n el = self.get_element_by_name(root_el.tag, el_idx)\n el[:] = list(root_el)\n el.attrib = root_el.attrib", "def flatten(self, root: TreeNode) -> None:\n if not root:\n return None\n\n self.flatten(root.right)\n self.flatten(root.left)\n\n root.right = self.prev\n root.left = None\n self.prev = root", "def delete_root(self, node):\n current = node\n successor = self.find_successor(current) \n temp_height = current.height\n current.height = successor.height\n successor.height = temp_height\n\n if successor != None:\n self.root = successor\n parent = successor.parent\n\n if successor.parent != node:\n if parent.left == successor:\n parent.left = successor.left\n else:\n parent.right = successor.right\n if node.left != successor:\n successor.left = node.left\n else:\n successor.left = None\n if node.right != successor:\n successor.right = node.right \n else:\n successor.right = None\n\n else:\n ancestor = node.left\n ancestor.parent = None\n self.root = ancestor\n del self.nodes[node.key]", "def _rotate_right(self, og_root):\r\n new_root = og_root.left\r\n og_root.left = new_root.right\r\n if new_root.right:\r\n new_root.right.parent = og_root\r\n new_root.parent = og_root.parent\r\n if og_root.value == self.root.value: # og_root is tree root\r\n self.root = new_root\r\n else:\r\n if og_root is og_root.parent.right:\r\n og_root.parent.right = new_root\r\n else:\r\n og_root.parent.left = new_root\r\n new_root.right = og_root\r\n og_root.parent = new_root", "def patch(lines):\n if not get_root():\n set_root(os.getcwd())", "def set_root(self, root):\n self.root = root\n if self.root is not None:\n correct_type(root, Tag)", "def alter_tree(node):\n if not node.input:\n return _alter_node(node)\n\n converted_children = []\n for input_op in node.input:\n converted_children.append(alter_tree(input_op))\n node.input = converted_children\n return _alter_node(node)", "def set_root(self, root):\n self.root = root\n self.sites = [root]", "def flatten(self, root: TreeNode) -> None:\n if root is None :\n return\n if self.node is not None :\n self.node.left = None\n self.node.right = root\n self.node = root\n right = root.right\n self.flatten(root.left)\n self.flatten(right)", "def recoverTree(self, root: Optional[TreeNode]) -> None:\n stack = []\n\n vals = []\n node = root\n while stack or node:\n while node:\n stack.append(node)\n node = node.left\n node = stack.pop()\n vals.append(node.val)\n node = node.right\n\n vals.sort()\n node = root\n i = 0\n while stack or node:\n while node:\n stack.append(node)\n node = node.left\n node = stack.pop()\n node.val = vals[i]\n i += 1\n node = node.right", "def reverse(self):\n self.root.reverse()", "def update_subtree(self, old_subroot: 'GraphNode', new_subroot: 'GraphNode'):\n self.operator.update_subtree(old_subroot, new_subroot)", "def _insert(self, root: AVLTreeNode, key, val=None) -> AVLTreeNode:\n if not root:\n return AVLTreeNode(key, val, bf=0) # If empty root this is the root of new tree\n if key < root.key:\n left_sub_root = self._insert(root.left, key, val) # insert and update left subroot\n root.left = left_sub_root\n left_sub_root.parent = root # assign the parent\n elif key > root.key:\n right_sub_root = self._insert(root.right, key, val) # insert and update right subroot\n root.right = right_sub_root\n right_sub_root.parent = root\n else:\n return root # no duplicate keys allowed; no insertion, return current root as is\n # finally, update heights and bf's of current root after insertion completed (postorder processing)\n root.height = max(self._get_height(root.left), self._get_height(root.right)) + 1\n root.bf = self._get_height(root.left) - self._get_height(root.right)\n return self.rebalance(root) # RE-BALANCE CURRENT ROOT (if required)", "def flatten(self, root: TreeNode) -> None:\n # User must pass a node\n if root:\n root_flatten = TreeNode(root.val)\n leaf = inOrderTreeWalk(root, root_flatten)\n root.left = None\n root.right = root_flatten.right.right", "def replace_node(self, node,new_node):\n #Special Case: Replace the root.\n if node == self.root :\n self.root = new_node\n return\n parent = node.parent\n if parent.left and parent.left == node:\n parent.left = new_node\n elif parent.right and parent.right == node:\n parent.right = new_node\n else:\n print(\"Incorrect Parent-Child relation!\")\n raise RuntimeError", "def recoverTree(self, root):\n # 线性空间复杂度\n # 存储树节点的值\n treeVal = []\n # 存储树的节点\n treePointer = []\n # 中序遍历\n self.inorder(root, treeVal, treePointer)\n treeVal.sort()\n for i in range(len(treeVal)):\n treePointer[i].val = treeVal[i]", "def flatten(self, root: TreeNode) -> None:\n self.previous = TreeNode()\n self.traverse(root)\n return root", "def reset_tree(self):\n self.root = None\n self.action = None\n self.dist_probability = None", "def regenerate_tree(self, newpos):\n self.path = self.tree[newpos][2]\n self.tree = self.get_tree()\n self.pos = self.get_curpos()", "def __update(self, idx):\n parent = (idx - 1) // 2\n while parent >= 0:\n left, right = 2 * parent + 1, 2 * parent + 2\n self.__tree[parent] = self.__tree[left] + self.__tree[right]\n parent = (parent - 1) // 2", "def recoverTree(self, root: TreeNode) -> None:\n self.firstNode = None\n self.secondNode = None\n self.preNode = TreeNode(float(\"-inf\"))\n\n def in_order(root):\n if not root:\n return\n in_order(root.left)\n if self.firstNode == None and self.preNode.val >= root.val:\n self.firstNode = self.preNode\n if self.firstNode and self.preNode.val >= root.val:\n self.secondNode = root\n self.preNode = root\n in_order(root.right)\n\n in_order(root)\n self.firstNode.val, self.secondNode.val = self.secondNode.val, self.firstNode.val", "def _refresh_tree_ref(self):\n self._tree_ref = RedBlackNodeRef(\n address=self._storage.get_root_address())", "def remove_first(self) -> bool:\n #tree isempty\n if self.root is None:\n return False\n\n #root== leaf\n if self.is_leaf(self.root):\n self.root = None\n return True\n\n #root has!= right tree\n if self.root.right is None:\n self.root = self.root.left\n return True\n\n #right tree\n #right tree\n replace_node = self.root.right\n replace_parent = self.root\n left_bool = False\n while replace_node.left is not None:\n replace_parent = replace_node\n replace_node = replace_node.left\n left_bool = True\n\n # remove left\n if left_bool:\n replace_parent.left = replace_node.right\n else:\n replace_parent.right = replace_node.right\n\n # insert left into root\n replace_node.left = self.root.left\n replace_node.right = self.root.right\n self.root = replace_node\n return True", "def _correct_tree(self, current_element: Node):\r\n while True:\r\n if current_element == None or current_element.parent() == None:\r\n return None\r\n current_element = current_element.parent()\r\n b1 = current_element.balance()\r\n\r\n try:\r\n b2 = current_element.right_son().balance()\r\n except AttributeError:\r\n b2 = 0\r\n try:\r\n b3 = current_element.right_son().left_son().balance()\r\n except AttributeError:\r\n b3 = 0\r\n\r\n if b1 in (-1, 0, 1):\r\n if current_element.parent() == None:\r\n break\r\n else:\r\n continue\r\n elif ((b1 == -2 and b2 == 1 and b3 == -1) or\r\n (b1 == -2 and b2 == 1 and b3 == 0 ) or\r\n (b1 == -2 and b2 == 1 and b3 == 1)):\r\n current_element.reset(*self._right_left(current_element))\r\n elif b1 == -2:\r\n current_element.reset(*self._right_right(current_element))\r\n break\r\n\r\n try:\r\n b2 = current_element.left_son().balance()\r\n except AttributeError:\r\n b2 = 0\r\n try:\r\n b3 = current_element.left_son().right_son().balance()\r\n except AttributeError:\r\n b3 = 0\r\n\r\n if ((b1 == 2 and b2 == 2 and b3 == 2) or\r\n (b1 == -1 and b2 == -1 and b3 == -1) or\r\n (b1 == -1 and b2 == 0 and b3 == 1) or\r\n (b1 == 2 and b2 == -1 and b3 == 0)):\r\n current_element.reset(*self._left_right(current_element))\r\n elif b1 == 2:\r\n current_element.reset(*self._left_left(current_element))\r\n break\r\n \r\n if current_element.parent() == None:\r\n break", "def clear(self):\n self.root = None", "def apply(self, tree):\n raise NotImplementedError()", "def put(self, key, value):\n if key is None:\n return\n self.root = put_in_subtree(self.root, key, value)\n self.root.colour = False # make sure that the root is black", "def _uproot(self):\n left, right = self.left, self.right\n if left is not None:\n left.parent = None\n if right is not None:\n right.parent = None\n return left, right", "def build():\n root = TreeNode(3)\n root.left = TreeNode(2)\n root.right = TreeNode(4)\n root.right.right = TreeNode(7)\n root.right.left = TreeNode(5)\n return root", "def write_root(self, root: Position) -> None:\n # Write only the body of the root.\n self.write_body(root)\n # Write all nodes of the tree, except ignored nodes.\n self.level_offset = self.compute_level_offset(root)\n self.root_level = root.level()\n p = root.threadNext() # Returns a copy.\n after = root.nodeAfterTree()\n while p and p != after:\n h = p.h.rstrip()\n if g.match_word(h, 0, '@ignore-tree'):\n p.moveToNodeAfterTree()\n continue\n if g.match_word(h, 0, '@ignore-node'):\n p.moveToThreadNext()\n continue\n if not g.match_word(h, 0, '@no-head'):\n self.write_headline(p)\n self.write_body(p)\n p.moveToThreadNext()", "def _restore_global_position(x, root_pos, root_idx=None):\n x = x + root_pos\n if root_idx is not None:\n x = np.insert(x, root_idx, root_pos.squeeze(1), axis=1)\n return x", "def root_nodes(self, node1, node2, distance):\n if node1 == node2.parent:\n upper_node = node1\n lower_node = node2\n upper_dist, lower_dist = distance, lower_node.branch - distance\n elif node2 == node1.parent:\n upper_node = node2\n lower_node = node1\n upper_dist, lower_dist = lower_node.branch - distance, distance\n else:\n raise PhyloValueError('root_nodes() requires that one of the given nodes is the parent of the other.')\n if len(self.root.children) <= 1:\n raise PhyloValueError('cannot re-root a tree where the existing root has one or no children.')\n elif len(self.root.children) == 2:\n if upper_node == self.root:\n # Just need to adjust branch lengths\n root_child = self.root.children[1] if self.root.children[0] == lower_node else self.root.children[0]\n root_child.branch += upper_dist\n lower_node.branch = lower_dist\n else:\n upper_path = self.find_path_to_root(upper_node)\n # Process the old root child after removing the root:\n root_child = self.root.children[1] if self.root.children[0] == upper_path[1] else self.root.children[0]\n root_child.branch += upper_path[1].branch\n root_child.parent = upper_path[1]\n upper_path[1].children.append(root_child)\n # Process nodes between root and upper_node:\n prev_node = upper_path[1]\n for next_node in upper_path[2:]:\n prev_node.children.remove(next_node)\n prev_node.parent = next_node\n next_node.children.append(prev_node)\n prev_node.branch = next_node.branch\n prev_node = next_node\n # Process upper_node, lower_node, and the new root\n upper_node.parent = lower_node.parent = self.root\n upper_node.children.remove(lower_node)\n self.root.children = [node1, node2] # Keeps the argument order\n upper_node.branch = upper_dist\n lower_node.branch = lower_dist\n else: # If the root has 3 children it means it's an unrooted tree\n new_root = self.new_tree_node()\n new_root.branch = self.root.branch # Transfers any existing root branch\n if upper_node != self.root:\n upper_path = self.find_path_to_root(upper_node)\n prev_node = self.root\n for next_node in upper_path[1:]:\n prev_node.children.remove(next_node)\n prev_node.parent = next_node\n next_node.children.append(prev_node)\n prev_node.branch = next_node.branch\n prev_node = next_node\n upper_node.children.remove(lower_node)\n upper_node.branch = upper_dist\n lower_node.branch = lower_dist\n new_root.children.append(upper_node)\n new_root.children.append(lower_node)\n upper_node.parent = lower_node.parent = new_root\n self.root = new_root\n self.process_tree_nodes()", "def update(self):\n diff = self._diff()\n if not diff:\n # Nothing to do!\n return\n self.parent.update_node(self, diff)", "def replace_subtree(self, tree, update_tree=True):\n if self.parent is None: # Changing the whole tree\n self.__dict__ = tree.__dict__\n else:\n if self is self.parent.left_subtree:\n self.parent.left_subtree = tree\n else:\n self.parent.right_subtree = tree\n if update_tree:\n self.update_tree()\n return self", "def _restore_root_target_weight(target_weight, root_weight, root_idx=None):\n if root_idx is not None:\n root_weight = np.full(target_weight.shape[0], root_weight, dtype=target_weight.dtype)\n target_weight = np.insert(target_weight, root_idx, root_weight[:, None], axis=1)\n return target_weight", "def update(self, tree_path, value):\n\t\traise NotImplementedError", "def __init__(self, root: Node = None):\n # this alllows us to initialize by copying an existing tree\n self.root = deepcopy(root)\n if self.root:\n self.root.parent = None\n self.size = 0 if not self.root else self.root.subtree_size()", "def set_root(self, xpath):\n if xpath[:2] is not '//':\n # Add the // to the front of the string if it isn't there\n self.root = self.tree.xpath('//{}'.format(xpath))\n self.base = self.root[0].base\n return self.root\n self.root = self.tree.xpath(xpath)\n self.base = self.root[0].base\n return self.root", "def _root():\n return 0", "def temporary(self, path):\r\n if path is None:\r\n raise ValueError('Can only temporarily establish a build root given a path.')\r\n prior = self._root_dir\r\n self._root_dir = path\r\n try:\r\n yield\r\n finally:\r\n self._root_dir = prior", "def delete(self, val):\n\n\t\tself.root = self.deleteHelper(self.root, val)\n\t\tself.numNodes = 0\n\t\tif self.root:\n\t\t\tQ = [self.root]\n\t\t\twhile Q:\n\t\t\t\tnode = Q.pop(0)\n\t\t\t\tif node.left:\n\t\t\t\t\tQ.append(node.left)\n\t\t\t\tif node.right:\n\t\t\t\t\tQ.append(node.right)\n\t\t\t\tself.numNodes += 1", "def prune_tree ( self ):\n tree = copy.deepcopy ( self.tree )\n change_made = True\n # As long as changes are made, recursively prune from the root node.\n while change_made:\n change_made = self.prune_node ( tree, tree.root )\n return tree\n # End prune_tree()", "def root(self):\n return self._make_position(self._root)", "def invert_binary_tree(root):\n if root is None:\n return None\n left = invert_binary_tree(root.left)\n right = invert_binary_tree(root.right)\n root.left = right\n root.right = left\n return root", "def mutate(self):\n num_leafs_before = self.num_leafs()\n non_leafs = [v for v, d in self.out_degree() if d > 0]\n box = non_leafs[np.random.choice(len(non_leafs))]\n children = list(self[box])\n for child in children:\n self.remove_subtree(child)\n num_leafs_after = self.num_leafs()\n num_removed = num_leafs_before - num_leafs_after\n self.generate(num_removed)", "def leaf_replace(self, node):\r\n if self.label is not None: # return if leaf node\r\n return\r\n left, right = self.left, self.right\r\n left.parents.remove(self) if self in left.parents else left.parents\r\n right.parents.remove(self) if self in right.parents else right.parents\r\n if node.label is None:\r\n internal = [node]\r\n else:\r\n internal = []\r\n while len(internal) > 0:\r\n l = internal.pop(0)\r\n if l.left.label is not None: # leaf\r\n if l.left.label == 0:\r\n l.left = left\r\n left.parents.append(l) if l not in left.parents else left.parents\r\n elif l.left.label == 1:\r\n l.left = right\r\n right.parents.append(l) if l not in right.parents else right.parents\r\n else:\r\n internal.append(l.left)\r\n\r\n if l.right.label is not None: # leaf\r\n if l.right.label == 0:\r\n l.right = left\r\n left.parents.append(l) if l not in left.parents else left.parents\r\n elif l.right.label == 1:\r\n l.right = right\r\n right.parents.append(l) if l not in right.parents else right.parents\r\n else:\r\n internal.append(l.right)", "def add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._root = self._Node(e)\n self._size = 1\n return self._root", "def test_on_copy_not_on_root():\n builder = TreeBuilder()\n builder.create_root(0)\n builder.add_child(5)\n builder.add_child(6, move=True)\n\n _ = builder.build()\n builder.add_child(7)\n\n t = builder.build()\n assert_tree_structure(t, {(): 0, (0, ): 5, (1, ): 6, (1, 0): 7})", "def _tree_update(self, new_tree: Tree, event: Event):\n raise NotImplementedError()", "def update(\n self, index: Union[int, np.ndarray], value: Union[float, np.ndarray]\n ):\n\n tree_index = self.capacity + index\n self._tree[tree_index] = value\n\n # Propagate up the tree.\n parent = tree_index // 2\n while np.any(parent > 0):\n left = self._tree[2 * parent] # Children/sibling.\n right = self._tree[2 * parent + 1]\n # Note: Due to possible floating point error in the sum-tree case,\n # it's safer to recompute the parent nodes directly rather than to\n # accumulate an \"update\" up the tree which could be faster.\n self._tree[parent] = self.operation(left, right)\n parent = parent // 2", "def reset(self):\r\n self._root_dir = None", "def __root(T: \"Graph\"):\n T_copy = T.copy()\n\n # Leaves are removed from the copy untill 1 or 2 vertices remain\n while len(T_copy.vertices) > 2:\n vertices_to_remove = []\n for v in T_copy.vertices:\n if v.degree == 1:\n vertices_to_remove.append(v)\n for v in vertices_to_remove:\n T_copy.del_vertex(v)\n\n root_labels = []\n for v in T_copy.vertices:\n root_labels.append(v.label)\n\n # From the original tree, the roots are returned\n T_root = []\n for v in T.vertices:\n if v.label in root_labels:\n T_root.append(v)\n\n return T_root", "def recoverTree(self, root: TreeNode) -> None:\n if not root:\n return\n if root.left and root.left.val > root.val:\n root.left.val, root.val = root.val, root.left.val\n return\n if root.right and root.right.val < root.val:\n root.right.val, root.val = root.val, root.right.val\n return\n self.recoverTree(root.left)\n self.recoverTree(root.right)", "def recoverTree(self, root: TreeNode) -> None:\n # base case\n if not root:\n return\n # a list to store node to be exchange\n change = []\n lst = self.inorder(root)\n for i in range(len(lst)-1):\n if lst[i+1].val < lst[i].val:\n # If we already found the first one i, the seconde one would be i+1\n # you can find that in the second example given by Leetcode\n if change:\n change.append(i+1)\n else:\n change.append(i)\n # exchange elements\n if len(change) == 1:\n lst[change[0]].val, lst[change[0]+1].val = lst[change[0]+1].val, lst[change[0]].val\n else:\n lst[change[0]].val, lst[change[1]].val = lst[change[1]].val, lst[change[0]].val" ]
[ "0.71672195", "0.70915145", "0.7055208", "0.69615245", "0.67885584", "0.66141015", "0.6551368", "0.6512747", "0.6512747", "0.6512747", "0.6512747", "0.6431704", "0.6411715", "0.6395968", "0.6395968", "0.6368781", "0.63604164", "0.6329711", "0.6324841", "0.6305654", "0.6276602", "0.62755746", "0.6252733", "0.6252057", "0.6237605", "0.62375796", "0.62320226", "0.62185884", "0.62125385", "0.61870265", "0.61870265", "0.61870265", "0.6182107", "0.61427754", "0.6142155", "0.6130873", "0.61242235", "0.6089576", "0.60815316", "0.60815316", "0.6071955", "0.6069354", "0.6068959", "0.60666305", "0.6065871", "0.6056988", "0.6038969", "0.60272694", "0.6019179", "0.60045314", "0.5997348", "0.59790534", "0.5971439", "0.59623253", "0.59509313", "0.59448934", "0.59348404", "0.5934561", "0.5918493", "0.59151924", "0.59056365", "0.590243", "0.58918774", "0.5881823", "0.5879029", "0.5852593", "0.5844548", "0.5844056", "0.5839235", "0.5837182", "0.5826454", "0.58195037", "0.5802826", "0.5798592", "0.57956165", "0.5795559", "0.57952935", "0.5788895", "0.5788525", "0.5779543", "0.5777743", "0.57711816", "0.5769816", "0.57642585", "0.57626456", "0.57606506", "0.57559395", "0.5754561", "0.5748984", "0.57412434", "0.5736784", "0.5735515", "0.5735303", "0.5734825", "0.57278997", "0.5727807", "0.57274973", "0.5716388", "0.57101053", "0.5705146", "0.56903195" ]
0.0
-1
Do not return anything, modify root inplace instead.
def flatten(self, root) -> None: # 递归出口一定要到叶子节点 if not root: return None if not root.left and not root.right: return root lefttail = self.flatten(root.left) righttail = self.flatten(root.right) if lefttail: lefttail.right = root.right root.right = root.left root.left = None return righttail if righttail else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uproot(self):\n self.__root__ = self\n return self", "def root_replace(self,node):\r\n self.feature_index = node.feature_index\r\n self.threshold = node.threshold\r\n self.label = node.label\r\n self.left = node.left\r\n self.right = node.right\r\n self.substitute = node.substitute\r\n if node.left is not None and node.right is not None:\r\n node.left.parents.remove(node) if node in node.left.parents else node.left.parents\r\n node.left.parents.append(self) if self not in node.left.parents else node.left.parents\r\n node.right.parents.remove(node) if node in node.right.parents else node.right.parents\r\n node.right.parents.append(self) if self not in node.right.parents else node.right.parents", "def _replace(self, x, y):\n y.parent = x.parent\n if x is self.root:\n self.root = y\n return\n elif x is x.parent.left:\n x.parent.left = y\n else:\n x.parent.right = y\n\n self.update(y, -1)", "def update_root(self, action: Action) -> \"MonteCarloSearchTree\":\n if action in self._root.children:\n new_root = self._root.children[action]\n else:\n new_root = self._root.add_child(action)\n self._root.remove_child(new_root)\n self._root = new_root\n return self", "def fix_root(self):\n # In the main bzrlib code, this forces the new tree to use the same\n # tree root as the old tree. But merge-into explicitly doesn't want\n # that. So the first portion is just a copy of the old code, and then\n # we change the rest.\n try:\n self.tt.final_kind(self.tt.root)\n except NoSuchFile:\n self.tt.cancel_deletion(self.tt.root)\n if self.tt.final_file_id(self.tt.root) is None:\n self.tt.version_file(self.tt.tree_file_id(self.tt.root),\n self.tt.root)\n # All we do is skip the step which used to sanitize the root id.", "def recoverTree(self, root: TreeNode) -> None:\n self.tmp, self.left, self.right = None, None, None\n self.helper(root)\n self.left.val, self.right.val = self.right.val, self.left.val", "def _fix_up_to_root(self, idx):\n combine_fn = self._combine_fn\n while idx >= 1:\n # self.data[idx] = combine_fn(self.data[self._left(idx)], self.data[self._right(idx)])\n self.data[idx] = combine_fn(self.data[2 * idx], self.data[2 * idx + 1])\n # idx = self._parent(idx)\n idx = idx >> 1", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def recoverTree(self, root: Optional[TreeNode]) -> None:\n self.inorder(root)\n self.first.val,self.second.val=self.second.val,self.first.val", "def edit_root(self) -> Generator[Root, None, None]:\n with self.edit(Root.type) as root:\n if not isinstance(root, Root):\n raise RuntimeError(\"Unexpected root type\")\n yield root", "def set_relative_root(self, root):\r\n self.root = root", "def set_relative_root(self, root):\r\n self.root = root", "def root_orig(self):\n if hasattr(self, \"orig\"):\n return self.orig.root_orig\n return self", "def set_root(self, x, root):\n\n while self.P[x] < x:\n\n j = self.P[x]\n self.P[x] = root\n x = j\n\n self.P[x] = root", "def updateTree(self):\n self.reset()\n self.resetTree() \n self.read()", "def clone_as_root(self) :\n clone = deepcopy(self)\n clone.parent = None\n clone.path_length = 0\n clone.previous_action = None\n return clone", "def recoverTree(self, root: TreeNode) -> None:\n self.inorder(root)\n self.first.val, self.second.val = self.second.val, self.first.val", "def update_with_move(self, point, last_move):\n if point == -1:\n # reset the tree\n self._root = TreeNode(None, 1.0)\n else:\n self._root = self._root._children[point][last_move]\n self._root._parent = None", "def set_relative_root(self, root):\n self.root = root", "def rebalance_root(self):\n split_dirs = [d.split('/') for d in self.directories]\n new_root = []\n for level in zip(*split_dirs):\n if not(all([d == level[0] for d in level])):\n break\n new_root.append(level[0])\n self.root = '/'.join(new_root)", "def set_root(self):\n try:\n _check_call(_LIB.TreeliteTreeBuilderSetRootNode(\n self.tree.handle,\n ctypes.c_int(self.node_key)))\n except AttributeError:\n raise TreeliteError('This node has never been inserted into a tree; '\\\n + 'a node must be inserted before it can be a root')", "def _on_root_finder_update(self, change):\n if hasattr(self, \"_root_finder\"):\n del self._root_finder", "def root(self, node):\n\n if self.set[node] == node:\n return node\n\n self.set[node] = self.root(self.set[node])\n return self.set[node]", "def recoverTree(self, root: TreeNode) -> None:\n if not root:\n return\n self.pre = None\n self.m1 = None\n self.m2 = None\n self.helper(root)\n self.m1.val,self.m2.val = self.m2.val, self.m1.val", "def updatetree(self):\n if self.node:\n self.node.update()\n self.draw()", "def recoverTree(self, root):\n it = self.isValidBST(root)\n a, b = next(it)\n c = next(it, None)\n if c:\n _, c = c\n a.val, c.val = c.val, a.val\n else:\n a.val, b.val = b.val, a.val\n return root", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError(\"root exists\")\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def SyncRoot(self) -> object:", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value) # - leaf_value because the MCTS tree is a max-min tree\n self.update(leaf_value)", "def _add_root(self, data):\n if self._root is not None:\n raise ValueError(\"Root exists\")\n self._size = 1\n self._root = self._Node(data)\n return self._make_position(self._root)", "def _root(self, ind):\n while (ind != self._id[ind]):\n #make every other node in path to point to its grandparent\n self._id[ind] = self._id[self._id[ind]]\n ind = self._id[ind]\n return ind", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def __init__(self):\n self.root = self.get_new_node();", "def test_removing_root(item):\n item.root = None\n assert not item.has_root", "def clean():\n new_tree = None", "def _replace_node(self, nxt, node):\n nxt.left = node.left\n nxt.right = node.right\n nxt.parent = node.parent\n if node is self.root:\n self.root = nxt\n if nxt.left:\n nxt.left.parent = nxt\n if nxt.right:\n nxt.right.parent = nxt\n if nxt.parent:\n if nxt.parent.right is node:\n nxt.parent.right = nxt\n else:\n nxt.parent.left = nxt", "def _rotate_left(self, og_root):\r\n new_root = og_root.right\r\n og_root.right = new_root.left\r\n if new_root.left:\r\n new_root.left.parent = og_root\r\n new_root.parent = og_root.parent\r\n if og_root is self.root: # if our original root of the rotation is the tree root, replace tree root with new root\r\n self.root = new_root\r\n else:\r\n if og_root is og_root.parent.left:\r\n og_root.parent.left = new_root\r\n else:\r\n og_root.parent.right = new_root\r\n new_root.left = og_root\r\n og_root.parent = new_root", "def flatten(self, root: TreeNode) -> None:\n if not root: return\n self.flatten(root.right)\n self.flatten(root.left)\n root.right = self.last\n root.left = None\n self.last = root", "def flatten(self, root: TreeNode) -> None:\n if not root:\n return\n left = root.left\n right = root.right\n root.left = None\n self.flatten(left)\n self.flatten(right)\n root.right = left\n cur = root\n while cur.right:\n cur = cur.right\n cur.right = right", "def replace_by_etree(self, root_el, el_idx=0):\n el = self.get_element_by_name(root_el.tag, el_idx)\n el[:] = list(root_el)\n el.attrib = root_el.attrib", "def flatten(self, root: TreeNode) -> None:\n if not root:\n return None\n\n self.flatten(root.right)\n self.flatten(root.left)\n\n root.right = self.prev\n root.left = None\n self.prev = root", "def delete_root(self, node):\n current = node\n successor = self.find_successor(current) \n temp_height = current.height\n current.height = successor.height\n successor.height = temp_height\n\n if successor != None:\n self.root = successor\n parent = successor.parent\n\n if successor.parent != node:\n if parent.left == successor:\n parent.left = successor.left\n else:\n parent.right = successor.right\n if node.left != successor:\n successor.left = node.left\n else:\n successor.left = None\n if node.right != successor:\n successor.right = node.right \n else:\n successor.right = None\n\n else:\n ancestor = node.left\n ancestor.parent = None\n self.root = ancestor\n del self.nodes[node.key]", "def _rotate_right(self, og_root):\r\n new_root = og_root.left\r\n og_root.left = new_root.right\r\n if new_root.right:\r\n new_root.right.parent = og_root\r\n new_root.parent = og_root.parent\r\n if og_root.value == self.root.value: # og_root is tree root\r\n self.root = new_root\r\n else:\r\n if og_root is og_root.parent.right:\r\n og_root.parent.right = new_root\r\n else:\r\n og_root.parent.left = new_root\r\n new_root.right = og_root\r\n og_root.parent = new_root", "def patch(lines):\n if not get_root():\n set_root(os.getcwd())", "def set_root(self, root):\n self.root = root\n if self.root is not None:\n correct_type(root, Tag)", "def alter_tree(node):\n if not node.input:\n return _alter_node(node)\n\n converted_children = []\n for input_op in node.input:\n converted_children.append(alter_tree(input_op))\n node.input = converted_children\n return _alter_node(node)", "def set_root(self, root):\n self.root = root\n self.sites = [root]", "def flatten(self, root: TreeNode) -> None:\n if root is None :\n return\n if self.node is not None :\n self.node.left = None\n self.node.right = root\n self.node = root\n right = root.right\n self.flatten(root.left)\n self.flatten(right)", "def recoverTree(self, root: Optional[TreeNode]) -> None:\n stack = []\n\n vals = []\n node = root\n while stack or node:\n while node:\n stack.append(node)\n node = node.left\n node = stack.pop()\n vals.append(node.val)\n node = node.right\n\n vals.sort()\n node = root\n i = 0\n while stack or node:\n while node:\n stack.append(node)\n node = node.left\n node = stack.pop()\n node.val = vals[i]\n i += 1\n node = node.right", "def reverse(self):\n self.root.reverse()", "def update_subtree(self, old_subroot: 'GraphNode', new_subroot: 'GraphNode'):\n self.operator.update_subtree(old_subroot, new_subroot)", "def _insert(self, root: AVLTreeNode, key, val=None) -> AVLTreeNode:\n if not root:\n return AVLTreeNode(key, val, bf=0) # If empty root this is the root of new tree\n if key < root.key:\n left_sub_root = self._insert(root.left, key, val) # insert and update left subroot\n root.left = left_sub_root\n left_sub_root.parent = root # assign the parent\n elif key > root.key:\n right_sub_root = self._insert(root.right, key, val) # insert and update right subroot\n root.right = right_sub_root\n right_sub_root.parent = root\n else:\n return root # no duplicate keys allowed; no insertion, return current root as is\n # finally, update heights and bf's of current root after insertion completed (postorder processing)\n root.height = max(self._get_height(root.left), self._get_height(root.right)) + 1\n root.bf = self._get_height(root.left) - self._get_height(root.right)\n return self.rebalance(root) # RE-BALANCE CURRENT ROOT (if required)", "def flatten(self, root: TreeNode) -> None:\n # User must pass a node\n if root:\n root_flatten = TreeNode(root.val)\n leaf = inOrderTreeWalk(root, root_flatten)\n root.left = None\n root.right = root_flatten.right.right", "def replace_node(self, node,new_node):\n #Special Case: Replace the root.\n if node == self.root :\n self.root = new_node\n return\n parent = node.parent\n if parent.left and parent.left == node:\n parent.left = new_node\n elif parent.right and parent.right == node:\n parent.right = new_node\n else:\n print(\"Incorrect Parent-Child relation!\")\n raise RuntimeError", "def recoverTree(self, root):\n # 线性空间复杂度\n # 存储树节点的值\n treeVal = []\n # 存储树的节点\n treePointer = []\n # 中序遍历\n self.inorder(root, treeVal, treePointer)\n treeVal.sort()\n for i in range(len(treeVal)):\n treePointer[i].val = treeVal[i]", "def flatten(self, root: TreeNode) -> None:\n self.previous = TreeNode()\n self.traverse(root)\n return root", "def reset_tree(self):\n self.root = None\n self.action = None\n self.dist_probability = None", "def regenerate_tree(self, newpos):\n self.path = self.tree[newpos][2]\n self.tree = self.get_tree()\n self.pos = self.get_curpos()", "def __update(self, idx):\n parent = (idx - 1) // 2\n while parent >= 0:\n left, right = 2 * parent + 1, 2 * parent + 2\n self.__tree[parent] = self.__tree[left] + self.__tree[right]\n parent = (parent - 1) // 2", "def recoverTree(self, root: TreeNode) -> None:\n self.firstNode = None\n self.secondNode = None\n self.preNode = TreeNode(float(\"-inf\"))\n\n def in_order(root):\n if not root:\n return\n in_order(root.left)\n if self.firstNode == None and self.preNode.val >= root.val:\n self.firstNode = self.preNode\n if self.firstNode and self.preNode.val >= root.val:\n self.secondNode = root\n self.preNode = root\n in_order(root.right)\n\n in_order(root)\n self.firstNode.val, self.secondNode.val = self.secondNode.val, self.firstNode.val", "def _refresh_tree_ref(self):\n self._tree_ref = RedBlackNodeRef(\n address=self._storage.get_root_address())", "def remove_first(self) -> bool:\n #tree isempty\n if self.root is None:\n return False\n\n #root== leaf\n if self.is_leaf(self.root):\n self.root = None\n return True\n\n #root has!= right tree\n if self.root.right is None:\n self.root = self.root.left\n return True\n\n #right tree\n #right tree\n replace_node = self.root.right\n replace_parent = self.root\n left_bool = False\n while replace_node.left is not None:\n replace_parent = replace_node\n replace_node = replace_node.left\n left_bool = True\n\n # remove left\n if left_bool:\n replace_parent.left = replace_node.right\n else:\n replace_parent.right = replace_node.right\n\n # insert left into root\n replace_node.left = self.root.left\n replace_node.right = self.root.right\n self.root = replace_node\n return True", "def _correct_tree(self, current_element: Node):\r\n while True:\r\n if current_element == None or current_element.parent() == None:\r\n return None\r\n current_element = current_element.parent()\r\n b1 = current_element.balance()\r\n\r\n try:\r\n b2 = current_element.right_son().balance()\r\n except AttributeError:\r\n b2 = 0\r\n try:\r\n b3 = current_element.right_son().left_son().balance()\r\n except AttributeError:\r\n b3 = 0\r\n\r\n if b1 in (-1, 0, 1):\r\n if current_element.parent() == None:\r\n break\r\n else:\r\n continue\r\n elif ((b1 == -2 and b2 == 1 and b3 == -1) or\r\n (b1 == -2 and b2 == 1 and b3 == 0 ) or\r\n (b1 == -2 and b2 == 1 and b3 == 1)):\r\n current_element.reset(*self._right_left(current_element))\r\n elif b1 == -2:\r\n current_element.reset(*self._right_right(current_element))\r\n break\r\n\r\n try:\r\n b2 = current_element.left_son().balance()\r\n except AttributeError:\r\n b2 = 0\r\n try:\r\n b3 = current_element.left_son().right_son().balance()\r\n except AttributeError:\r\n b3 = 0\r\n\r\n if ((b1 == 2 and b2 == 2 and b3 == 2) or\r\n (b1 == -1 and b2 == -1 and b3 == -1) or\r\n (b1 == -1 and b2 == 0 and b3 == 1) or\r\n (b1 == 2 and b2 == -1 and b3 == 0)):\r\n current_element.reset(*self._left_right(current_element))\r\n elif b1 == 2:\r\n current_element.reset(*self._left_left(current_element))\r\n break\r\n \r\n if current_element.parent() == None:\r\n break", "def clear(self):\n self.root = None", "def apply(self, tree):\n raise NotImplementedError()", "def put(self, key, value):\n if key is None:\n return\n self.root = put_in_subtree(self.root, key, value)\n self.root.colour = False # make sure that the root is black", "def _uproot(self):\n left, right = self.left, self.right\n if left is not None:\n left.parent = None\n if right is not None:\n right.parent = None\n return left, right", "def build():\n root = TreeNode(3)\n root.left = TreeNode(2)\n root.right = TreeNode(4)\n root.right.right = TreeNode(7)\n root.right.left = TreeNode(5)\n return root", "def write_root(self, root: Position) -> None:\n # Write only the body of the root.\n self.write_body(root)\n # Write all nodes of the tree, except ignored nodes.\n self.level_offset = self.compute_level_offset(root)\n self.root_level = root.level()\n p = root.threadNext() # Returns a copy.\n after = root.nodeAfterTree()\n while p and p != after:\n h = p.h.rstrip()\n if g.match_word(h, 0, '@ignore-tree'):\n p.moveToNodeAfterTree()\n continue\n if g.match_word(h, 0, '@ignore-node'):\n p.moveToThreadNext()\n continue\n if not g.match_word(h, 0, '@no-head'):\n self.write_headline(p)\n self.write_body(p)\n p.moveToThreadNext()", "def _restore_global_position(x, root_pos, root_idx=None):\n x = x + root_pos\n if root_idx is not None:\n x = np.insert(x, root_idx, root_pos.squeeze(1), axis=1)\n return x", "def root_nodes(self, node1, node2, distance):\n if node1 == node2.parent:\n upper_node = node1\n lower_node = node2\n upper_dist, lower_dist = distance, lower_node.branch - distance\n elif node2 == node1.parent:\n upper_node = node2\n lower_node = node1\n upper_dist, lower_dist = lower_node.branch - distance, distance\n else:\n raise PhyloValueError('root_nodes() requires that one of the given nodes is the parent of the other.')\n if len(self.root.children) <= 1:\n raise PhyloValueError('cannot re-root a tree where the existing root has one or no children.')\n elif len(self.root.children) == 2:\n if upper_node == self.root:\n # Just need to adjust branch lengths\n root_child = self.root.children[1] if self.root.children[0] == lower_node else self.root.children[0]\n root_child.branch += upper_dist\n lower_node.branch = lower_dist\n else:\n upper_path = self.find_path_to_root(upper_node)\n # Process the old root child after removing the root:\n root_child = self.root.children[1] if self.root.children[0] == upper_path[1] else self.root.children[0]\n root_child.branch += upper_path[1].branch\n root_child.parent = upper_path[1]\n upper_path[1].children.append(root_child)\n # Process nodes between root and upper_node:\n prev_node = upper_path[1]\n for next_node in upper_path[2:]:\n prev_node.children.remove(next_node)\n prev_node.parent = next_node\n next_node.children.append(prev_node)\n prev_node.branch = next_node.branch\n prev_node = next_node\n # Process upper_node, lower_node, and the new root\n upper_node.parent = lower_node.parent = self.root\n upper_node.children.remove(lower_node)\n self.root.children = [node1, node2] # Keeps the argument order\n upper_node.branch = upper_dist\n lower_node.branch = lower_dist\n else: # If the root has 3 children it means it's an unrooted tree\n new_root = self.new_tree_node()\n new_root.branch = self.root.branch # Transfers any existing root branch\n if upper_node != self.root:\n upper_path = self.find_path_to_root(upper_node)\n prev_node = self.root\n for next_node in upper_path[1:]:\n prev_node.children.remove(next_node)\n prev_node.parent = next_node\n next_node.children.append(prev_node)\n prev_node.branch = next_node.branch\n prev_node = next_node\n upper_node.children.remove(lower_node)\n upper_node.branch = upper_dist\n lower_node.branch = lower_dist\n new_root.children.append(upper_node)\n new_root.children.append(lower_node)\n upper_node.parent = lower_node.parent = new_root\n self.root = new_root\n self.process_tree_nodes()", "def update(self):\n diff = self._diff()\n if not diff:\n # Nothing to do!\n return\n self.parent.update_node(self, diff)", "def replace_subtree(self, tree, update_tree=True):\n if self.parent is None: # Changing the whole tree\n self.__dict__ = tree.__dict__\n else:\n if self is self.parent.left_subtree:\n self.parent.left_subtree = tree\n else:\n self.parent.right_subtree = tree\n if update_tree:\n self.update_tree()\n return self", "def _restore_root_target_weight(target_weight, root_weight, root_idx=None):\n if root_idx is not None:\n root_weight = np.full(target_weight.shape[0], root_weight, dtype=target_weight.dtype)\n target_weight = np.insert(target_weight, root_idx, root_weight[:, None], axis=1)\n return target_weight", "def update(self, tree_path, value):\n\t\traise NotImplementedError", "def __init__(self, root: Node = None):\n # this alllows us to initialize by copying an existing tree\n self.root = deepcopy(root)\n if self.root:\n self.root.parent = None\n self.size = 0 if not self.root else self.root.subtree_size()", "def set_root(self, xpath):\n if xpath[:2] is not '//':\n # Add the // to the front of the string if it isn't there\n self.root = self.tree.xpath('//{}'.format(xpath))\n self.base = self.root[0].base\n return self.root\n self.root = self.tree.xpath(xpath)\n self.base = self.root[0].base\n return self.root", "def _root():\n return 0", "def temporary(self, path):\r\n if path is None:\r\n raise ValueError('Can only temporarily establish a build root given a path.')\r\n prior = self._root_dir\r\n self._root_dir = path\r\n try:\r\n yield\r\n finally:\r\n self._root_dir = prior", "def delete(self, val):\n\n\t\tself.root = self.deleteHelper(self.root, val)\n\t\tself.numNodes = 0\n\t\tif self.root:\n\t\t\tQ = [self.root]\n\t\t\twhile Q:\n\t\t\t\tnode = Q.pop(0)\n\t\t\t\tif node.left:\n\t\t\t\t\tQ.append(node.left)\n\t\t\t\tif node.right:\n\t\t\t\t\tQ.append(node.right)\n\t\t\t\tself.numNodes += 1", "def prune_tree ( self ):\n tree = copy.deepcopy ( self.tree )\n change_made = True\n # As long as changes are made, recursively prune from the root node.\n while change_made:\n change_made = self.prune_node ( tree, tree.root )\n return tree\n # End prune_tree()", "def root(self):\n return self._make_position(self._root)", "def invert_binary_tree(root):\n if root is None:\n return None\n left = invert_binary_tree(root.left)\n right = invert_binary_tree(root.right)\n root.left = right\n root.right = left\n return root", "def mutate(self):\n num_leafs_before = self.num_leafs()\n non_leafs = [v for v, d in self.out_degree() if d > 0]\n box = non_leafs[np.random.choice(len(non_leafs))]\n children = list(self[box])\n for child in children:\n self.remove_subtree(child)\n num_leafs_after = self.num_leafs()\n num_removed = num_leafs_before - num_leafs_after\n self.generate(num_removed)", "def leaf_replace(self, node):\r\n if self.label is not None: # return if leaf node\r\n return\r\n left, right = self.left, self.right\r\n left.parents.remove(self) if self in left.parents else left.parents\r\n right.parents.remove(self) if self in right.parents else right.parents\r\n if node.label is None:\r\n internal = [node]\r\n else:\r\n internal = []\r\n while len(internal) > 0:\r\n l = internal.pop(0)\r\n if l.left.label is not None: # leaf\r\n if l.left.label == 0:\r\n l.left = left\r\n left.parents.append(l) if l not in left.parents else left.parents\r\n elif l.left.label == 1:\r\n l.left = right\r\n right.parents.append(l) if l not in right.parents else right.parents\r\n else:\r\n internal.append(l.left)\r\n\r\n if l.right.label is not None: # leaf\r\n if l.right.label == 0:\r\n l.right = left\r\n left.parents.append(l) if l not in left.parents else left.parents\r\n elif l.right.label == 1:\r\n l.right = right\r\n right.parents.append(l) if l not in right.parents else right.parents\r\n else:\r\n internal.append(l.right)", "def add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._root = self._Node(e)\n self._size = 1\n return self._root", "def test_on_copy_not_on_root():\n builder = TreeBuilder()\n builder.create_root(0)\n builder.add_child(5)\n builder.add_child(6, move=True)\n\n _ = builder.build()\n builder.add_child(7)\n\n t = builder.build()\n assert_tree_structure(t, {(): 0, (0, ): 5, (1, ): 6, (1, 0): 7})", "def _tree_update(self, new_tree: Tree, event: Event):\n raise NotImplementedError()", "def update(\n self, index: Union[int, np.ndarray], value: Union[float, np.ndarray]\n ):\n\n tree_index = self.capacity + index\n self._tree[tree_index] = value\n\n # Propagate up the tree.\n parent = tree_index // 2\n while np.any(parent > 0):\n left = self._tree[2 * parent] # Children/sibling.\n right = self._tree[2 * parent + 1]\n # Note: Due to possible floating point error in the sum-tree case,\n # it's safer to recompute the parent nodes directly rather than to\n # accumulate an \"update\" up the tree which could be faster.\n self._tree[parent] = self.operation(left, right)\n parent = parent // 2", "def reset(self):\r\n self._root_dir = None", "def __root(T: \"Graph\"):\n T_copy = T.copy()\n\n # Leaves are removed from the copy untill 1 or 2 vertices remain\n while len(T_copy.vertices) > 2:\n vertices_to_remove = []\n for v in T_copy.vertices:\n if v.degree == 1:\n vertices_to_remove.append(v)\n for v in vertices_to_remove:\n T_copy.del_vertex(v)\n\n root_labels = []\n for v in T_copy.vertices:\n root_labels.append(v.label)\n\n # From the original tree, the roots are returned\n T_root = []\n for v in T.vertices:\n if v.label in root_labels:\n T_root.append(v)\n\n return T_root", "def recoverTree(self, root: TreeNode) -> None:\n if not root:\n return\n if root.left and root.left.val > root.val:\n root.left.val, root.val = root.val, root.left.val\n return\n if root.right and root.right.val < root.val:\n root.right.val, root.val = root.val, root.right.val\n return\n self.recoverTree(root.left)\n self.recoverTree(root.right)", "def recoverTree(self, root: TreeNode) -> None:\n # base case\n if not root:\n return\n # a list to store node to be exchange\n change = []\n lst = self.inorder(root)\n for i in range(len(lst)-1):\n if lst[i+1].val < lst[i].val:\n # If we already found the first one i, the seconde one would be i+1\n # you can find that in the second example given by Leetcode\n if change:\n change.append(i+1)\n else:\n change.append(i)\n # exchange elements\n if len(change) == 1:\n lst[change[0]].val, lst[change[0]+1].val = lst[change[0]+1].val, lst[change[0]].val\n else:\n lst[change[0]].val, lst[change[1]].val = lst[change[1]].val, lst[change[0]].val" ]
[ "0.71672195", "0.70915145", "0.7055208", "0.69615245", "0.67885584", "0.66141015", "0.6551368", "0.6512747", "0.6512747", "0.6512747", "0.6512747", "0.6431704", "0.6411715", "0.6395968", "0.6395968", "0.6368781", "0.63604164", "0.6329711", "0.6324841", "0.6305654", "0.6276602", "0.62755746", "0.6252733", "0.6252057", "0.6237605", "0.62375796", "0.62320226", "0.62185884", "0.62125385", "0.61870265", "0.61870265", "0.61870265", "0.6182107", "0.61427754", "0.6142155", "0.6130873", "0.61242235", "0.6089576", "0.60815316", "0.60815316", "0.6071955", "0.6069354", "0.6068959", "0.60666305", "0.6065871", "0.6056988", "0.6038969", "0.60272694", "0.6019179", "0.60045314", "0.5997348", "0.59790534", "0.5971439", "0.59623253", "0.59509313", "0.59448934", "0.59348404", "0.5934561", "0.5918493", "0.59151924", "0.59056365", "0.590243", "0.58918774", "0.5881823", "0.5879029", "0.5852593", "0.5844548", "0.5844056", "0.5839235", "0.5837182", "0.5826454", "0.58195037", "0.5802826", "0.5798592", "0.57956165", "0.5795559", "0.57952935", "0.5788895", "0.5788525", "0.5779543", "0.5777743", "0.57711816", "0.5769816", "0.57642585", "0.57626456", "0.57606506", "0.57559395", "0.5754561", "0.5748984", "0.57412434", "0.5736784", "0.5735515", "0.5735303", "0.5734825", "0.57278997", "0.5727807", "0.57274973", "0.5716388", "0.57101053", "0.5705146", "0.56903195" ]
0.0
-1
[initalize spotify class to be used to manage playlists]
def __init__(self): self.sp, self.user = self.init_auth_client() self.logger = logging.getLogger(__name__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, API, playlist_uri):\n\n self.API = API\n self.playlist_uri = playlist_uri\n self.metadata = None", "def __init__(self, username):\n self.spotify = spotipy.Spotify(simple_auth_token(username))", "def __init__(self, **kwargs):\n self.songs = SongList()\n self.song = Song()\n self.songs.load_songs(FILE_NAME)\n super(SongsToLearnApp, self).__init__(**kwargs)", "def __init__(self, console, numeric=False):\n\n self.spotify = Spotify()\n self.console = console\n self.watson = WatsonIntegrator(self.console)\n self.running = True\n self.numeric = numeric #Boolean stating whether the user is supposed to answer the requests with options via numbers\n\n self.functionCodes = ['login', 'NewQueue', 'AddArtists', 'AddFromPlaylist', 'ShowUpcoming', 'Shuffle']\n # self.listIDs = {'mood:party': '2zJS01uA6baDkuyd3bpD8J', 'mood:motivation': '09S8u5CfsqNykVe4PS7y5x', 'mood:chill': '2gSm5ak3xfip096FV2MutF',\n # 'top:dea': '2pnMZd3r7IrqQVRBxe9CCj', 'top:ignacio': '1J7sfsybA99F8w2UOpQJlM', 'top:alejandro': '5iN04uNssYaPDtgrHCaYUY',\n # 'top:steffen': '1M4nNxSs4748wpBiufTan8', 'playlist:paolo': '1YGHkKQfOpEQHIO06j71Dy',\n # 'playlist:alvaro': '1FTlyHI9BQfiPgINi1zR7a', 'playlist:professor': '78sVdD9qWLWGwJZnioJ6xX'}\n self.listIDs = {'playlist:Alejandro:Christmas': '1hwDrMP1y3fn6QgDUmFysl', \"playlist:Paolo's Playlists\": '3Oev8yETOHlczbqhmURedk',\n 'artist:Mariah Carey': '5VfX5baCsv3QV5y3Z9W2s9', 'playlist:Dea:Traffic': '2pnMZd3r7IrqQVRBxe9CCj',\n \"playlist:Alvaro's Top\": '7lfLPKDPICPC3kffI2A69B'}", "def init_user(self) -> Any:\n return \\\n spotipy.Spotify(auth_manager=spotipy.oauth2.SpotifyOAuth(scope=\"playlist-modify-public\",\n client_id=self._public_id, client_secret=self._secret_id,\n redirect_uri=self._redirect_uri))", "def __init__(self, SONG):\n self.track_name = SONG['name']\n self.artist_name = SONG['artist']\n self.provider = 'lastfm'\n self.track_number = \"1\"\n self.collection_name = \"\"\n self.release_date = \"\"\n self.artwork_url_100 = SONG[\"image\"][-1][\"#text\"]\n self.track_time = \"\"\n self.primary_genre_name = \"N/A\"", "def __init__(self, volumio, uid, name, info):\n self._volumio = volumio\n self._uid = uid\n self._name = name\n self._info = info\n self._state = {}\n self._playlists = []\n self._currentplaylist = None\n self.thumbnail_cache = {}", "def __init__(self):\r\n self.apiroot = 'http://ws.spotify.com/'\r\n self.add_filter(self.use_json)", "def __init__(self, kodi_helper, netflix_session):\n self.kodi_helper = kodi_helper\n self.netflix_session = netflix_session\n self.credentials = self.kodi_helper.get_credentials()\n self.profiles = []\n self.video_list_cache = {}\n self.prefetch_login()", "def __init__(self, settings):\n super().__init__(settings, self.player_info_url, Player)", "def __init__(self, store=None):\n\n if not store:\n store = sys.argv[0]\n\n self.current_page = None\n self.channel_id = ''\n self.playlist_id = ''\n\n self.current_results_file = os.path.join(store, \"old_results.json\")\n if os.path.exists(self.current_results_file):\n with open(self.current_results_file, \"r\") as f:\n results = json.load(f)\n self.current_page = results[\"current_page\"]\n self.playlist_id = results[\"playlist_id\"]\n\n flow = client.flow_from_clientsecrets(CLIENT_SECRETS_FILE, message=MISSING_MESSAGE,\n scope=SCOPE)\n\n storage = Storage(os.path.join(store, \"-oauth2.json\"))\n\n credentials = storage.get()\n\n if not credentials or credentials.invalid:\n flags = argparser.parse_args()\n credentials = run_flow(flow, storage, flags)\n\n self.youtube = build(API_SERVICE_NAME, API_VERSION, http=credentials.authorize(httplib2.Http()))\n self.get_ids_request = None", "def __init__(self, _format):\n path = _format + os.sep + \"playlists\"\n self.__playlists = []\n for root, directory, files in os.walk(path):\n for file in files:\n if file.endswith(\".txt\"):\n self.__playlists.append(file[:-4])", "def __init__(self, apikey, session=None):\n super().__init__('https://www.googleapis.com/youtube/v3/videos',\n apikey, session=session)", "def __init__(self, playlists_path=None):\n self.playlists = {}\n self.playlists_path = playlists_path\n\n # If a playlists path is given we should read them in.\n if self.playlists_path is not None:\n # First ensure we have a directory to work from!\n if not os.path.isdir(self.playlists_path):\n os.mkdir(self.playlists_path)\n\n # Secondly read in the files themselves.\n for filename in os.listdir(self.playlists_path):\n filepath = os.path.join(self.playlists_path, filename)\n with open(filepath) as pl_file:\n contents = json.load(pl_file)\n self.playlists[filename] = Playlist(contents)", "def __init__(self):\n self.api = TodoistAPI(API_KEY)\n self.api.sync()", "def __init__(self, client_access_token, artist_name):\n self.client_access_token = client_access_token\n self.artist_name = artist_name\n self.base_url = 'https://api.genius.com/'\n self.headers = {'Authorization': 'Bearer ' + self.client_access_token}\n self.artist_songs = None", "async def async_setup(hass, config):\n conf = config[DOMAIN]\n\n username = conf[CONF_USERNAME]\n password = conf[CONF_PASSWORD]\n accounts = conf.get(CONF_ACCOUNTS)\n\n @callback\n def websocket_handle_playlists(hass, connection, msg):\n \"\"\"Handle get playlist\"\"\"\n import spotipy\n access_token, expires = get_spotify_token(username=username, password=password)\n client = spotipy.Spotify(auth=access_token)\n resp = client._get('views/made-for-x?content_limit=10&locale=en&platform=web&types=album%2Cplaylist%2Cartist%2Cshow%2Cstation', limit=10,\n offset=0)\n connection.send_message(\n websocket_api.result_message(msg[\"id\"], resp)\n )\n\n def get_spotify_token(username, password):\n import spotify_token as st\n data = st.start_session(username, password)\n access_token = data[0]\n # token_expires = data[1]\n expires = data[1] - int(time.time())\n return access_token, expires\n\n def play(client, spotify_device_id, uri, random_song, repeat):\n # import spotipy\n # import http.client as http_client\n # spotipy.trace = True\n # spotipy.trace_out = True\n # http_client.HTTPConnection.debuglevel = 1\n\n _LOGGER.debug('Version: %s, playing URI: %s on device-id: %s', _VERSION, uri, spotify_device_id)\n if uri.find('track') > 0:\n _LOGGER.debug('Playing track using uris= for uri: %s', uri)\n client.start_playback(device_id=spotify_device_id, uris=[uri])\n else:\n if uri == 'random':\n _LOGGER.debug('Cool, you found the easter egg with playing a random playlist')\n playlists = client.user_playlists('me', 50)\n no_playlists = len(playlists['items'])\n uri = playlists['items'][random.randint(0, no_playlists - 1)]['uri']\n kwargs = {'device_id': spotify_device_id, 'context_uri': uri}\n if random_song:\n results = client.user_playlist_tracks(\"me\", uri)\n position = random.randint(0, results['total'] - 1)\n _LOGGER.debug('Start playback at random position: %s', position)\n kwargs['offset'] = {'position': position}\n\n _LOGGER.debug('Playing context uri using context_uri for uri: \"%s\" (random_song: %s)', uri, random_song)\n client.start_playback(**kwargs)\n if repeat:\n _LOGGER.debug('Turning repeat on')\n time.sleep(5)\n client.repeat(state=repeat, device_id=spotify_device_id)\n\n def get_account_credentials(call):\n \"\"\" Get credentials for account \"\"\"\n account = call.data.get(CONF_SPOTIFY_ACCOUNT)\n user = username\n pwd = password\n if account is not None:\n _LOGGER.debug('setting up with different account than default %s', account)\n user = accounts.get(account).get(CONF_USERNAME)\n pwd = accounts.get(account).get(CONF_PASSWORD)\n return user, pwd\n\n def shouldTransferPlayback(call, client):\n \"\"\" Check if something is playing \"\"\"\n uri = call.data.get(CONF_SPOTIFY_URI)\n if uri is None or uri.strip() == '' or call.data.get(CONF_TRANSFER_PLAYBACK):\n current_playback = client.current_playback()\n if current_playback is not None:\n _LOGGER.debug('current_playback from spotipy: %s', current_playback)\n return True\n return False\n\n async def start_casting(call):\n \"\"\"service called.\"\"\"\n import spotipy\n\n uri = call.data.get(CONF_SPOTIFY_URI)\n random_song = call.data.get(CONF_RANDOM, False)\n repeat = call.data.get(CONF_REPEAT)\n\n # Account\n user, pwd = get_account_credentials(call)\n\n # login as real browser to get powerful token\n access_token, expires = get_spotify_token(username=user, password=pwd)\n\n # get the spotify web api client\n client = spotipy.Spotify(auth=access_token)\n\n # launch the app on chromecast\n spotify_cast_device = SpotifyCastDevice(hass, call.data.get(CONF_DEVICE_NAME), call.data.get(CONF_ENTITY_ID))\n spotify_cast_device.startSpotifyController(access_token, expires)\n spotify_device_id = spotify_cast_device.getSpotifyDeviceId(client)\n\n transfer_playback = shouldTransferPlayback(call, client)\n if transfer_playback == True:\n _LOGGER.debug('Transfering playback')\n client.transfer_playback(\n device_id=spotify_device_id, force_play=True)\n else:\n play(client, spotify_device_id, uri, random_song, repeat)\n\n # Register websocket and service\n hass.components.websocket_api.async_register_command(\n WS_TYPE_SPOTCAST_PLAYLISTS, websocket_handle_playlists, SCHEMA_PLAYLISTS\n )\n\n hass.services.async_register(DOMAIN, 'start', start_casting,\n schema=SERVICE_START_COMMAND_SCHEMA)\n\n return True", "def __init__(self, pname, pmax, plist):\n\n #the player has to have... \n self.name = pname\n self.max_items = pmax\n self.items = plist", "def add_song_to_playlist(self):\n #populate our songs dictionary\n self.get_liked_videos()\n\n #collect all of uri\n uris = []\n for song,info in self.all_song_info.items():\n uris.append(info[\"spotify_uri\"])\n\n #create a new playlist\n playlist_id = self.create_playlist()\n\n #add all songs into new playlist\n\n #Spotipy can only add 100 songs at a time to a playlist that is why this method is taken\n g = len(uris)\n if g > 100:\n s = 0\n e = 99\n while g > 100:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:e])\n g -= 100\n s = e + 1\n e += 100\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:])\n else:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris)", "def __init__(self, **kwargs):\n self.paused = self._get('paused', **kwargs)\n self.title = self._get('title', **kwargs)\n self.artist = self._get('artist', **kwargs)\n self.album = self._get('album', **kwargs)\n self.total_time = self._get('total_time', **kwargs)\n self.position = self._get('position', **kwargs)\n self.mediakind = self._get('mediakind', **kwargs)\n self.playstatus = self._get('playstatus', **kwargs)", "def __init__(self):\n self.tiempos = ListaEnlazada() # Marcas de tiempo\n self.tracks = [] # Lista de tracks", "async def initialize(self):\r\n self.access_token = await async_get_value(SPOTIFY_ACCESS_TOKEN)\r\n self.refresh_token = await async_get_value(SPOTIFY_REFRESH_TOKEN)\r\n self.should_poll = await async_get_value(SPOTIFY_SHOULD_POLL)\r\n request_code = self.get_currently_playing().status_code\r\n if request_code == requests.codes.ok or request_code == requests.codes.no_content:\r\n self.start_polling_and_refresh()\r\n return\r\n\r\n # Go through the oauth flow.\r\n self.auth_thread = StoppableThread(target=self.check_and_test_auth)\r\n self.auth_thread.start()\r\n return", "def init_container(self):\n self.current_playlist = []", "async def set_playlists(self):\n _LOGGER.debug(\"[Foobar2k] Getting playlists\")\n if (self._power == POWER_ON):\n playlists = {}\n response = await self.prep_fetch(HTTP_GET, GET_PLAYLISTS, data=None)\n data = json.loads(response)\n _LOGGER.debug(f\"[Foobar2k] Have playlists [{data}]\")\n for pl in data[\"playlists\"]:\n playlists[pl[\"title\"]] = pl[\"id\"]\n if (pl[\"isCurrent\"]):\n self._current_playlist_id = pl[\"id\"]\n self._playlists = playlists", "def __init__(self, api_key):\n SteamAPI.__init__(self, \"\", api_key)\n self.tf2_items = None\n self.dota2_items = None", "def __init__(self):\n self.client = soundcloud.Client(client_id=\"e54975908f6d3073657a1a66b654f79a\")\n self.client_str = \"?client_id=e54975908f6d3073657a1a66b654f79a\"\n self.past_songs_db = open(\"past_songs.db\", 'r+')\n self.past_songs_db_data = [(line.strip(), \"\") for line in self.past_songs_db.readlines()]", "def __init__(\n self,\n data,\n on_repeat,\n datatype=None,\n playlisttype=None,\n show_lyrics=False,\n dont_cache_search=False,\n no_cache=False,\n no_related=False,\n disable_kw=False,\n ):\n URLPlayer.__init__(\n self,\n show_lyrics=show_lyrics,\n dont_cache_search=dont_cache_search,\n no_cache=no_cache,\n )\n NamePlayer.__init__(\n self,\n show_lyrics=show_lyrics,\n dont_cache_search=dont_cache_search,\n no_cache=no_cache,\n disable_kw=disable_kw,\n )\n self._iterable_list = []\n self.data = data\n self.datatype = datatype\n self.playlisttype = playlisttype\n self.no_related = no_related\n self.on_repeat = on_repeat\n self._playlist_names = [\n \"spotify\",\n \"youtube\",\n \"soundcloud\",\n \"billboard\",\n \"jiosaavn\",\n \"gaana\",\n \"cached\",\n \"youtubemusic\",\n ]\n self._datatypes = [\"playlist\", \"song\", \"URL\"]\n self.show_lyrics = show_lyrics\n self.dont_cache_search = dont_cache_search\n self.no_cache = no_cache", "def __init__(self):\n self._urls = []", "def __init__(self, **kwargs):\n self.identifier = kwargs.get(\"identifier\")\n self.playback_state = kwargs.get(\"playback_state\")\n self.title = kwargs.get(\"title\")\n self.series_name = kwargs.get(\"series_name\")\n self.artist = kwargs.get(\"artist\")\n self.album = kwargs.get(\"album\")\n self.genre = kwargs.get(\"genre\")\n self.total_time = kwargs.get(\"total_time\")\n self.position = kwargs.get(\"position\")\n self.season_number = kwargs.get(\"season_number\")\n self.episode_number = kwargs.get(\"episode_number\")\n self.repeat = kwargs.get(\"repeat\")\n self.shuffle = kwargs.get(\"shuffle\")\n self.media_type = kwargs.get(\"media_type\")\n self.playback_rate = kwargs.get(\"playback_rate\")\n self.supported_commands = kwargs.get(\"supported_commands\")\n self.artwork = kwargs.get(\"artwork\")\n self.artwork_identifier = kwargs.get(\"artwork_identifier\")\n self.artwork_mimetype = kwargs.get(\"artwork_mimetype\")\n self.artwork_width = kwargs.get(\"artwork_width\")\n self.artwork_height = kwargs.get(\"artwork_height\")\n self.skip_time = kwargs.get(\"skip_time\")\n self.app_name = kwargs.get(\"app_name\")\n self.content_identifier = kwargs.get(\"content_identifier\")", "def create_playlist(self, playlist_name):\n print(\"create_playlist needs implementation\")", "def import_spotify(info: dict) -> (str, int):\n url = info[\"playlist_url\"]\n # Validate URL\n matches = (\n re.match(r\"^https?://open\\.spotify\\.com/playlist/([a-zA-Z\\d]*)/?\", url)\n if isinstance(url, str)\n else None\n )\n if not matches:\n return \"Invalid URL\", 400\n playlist_id = matches.group(1)\n query_url = \"https://api.spotify.com/v1/playlists/\" + playlist_id\n query_headers = {\"Authorization\": \"Bearer {}\".format(info[\"access_token\"])}\n # Get/create playlist\n playlist_json = requests.get(query_url, headers=query_headers).json()\n if \"error\" in playlist_json:\n status = playlist_json[\"error\"].get(\"status\")\n message = playlist_json[\"error\"].get(\"message\")\n return (\n message if message else \"Error retrieving playlist\",\n status if status else 500,\n )\n playlist = Playlist(\n name=playlist_json[\"name\"],\n last_sync_spotify=timezone.now(),\n spotify_id=playlist_id,\n )\n if \"user\" in info:\n playlist.owner = PlaylstrUser.objects.filter(id=info[\"user\"]).first()\n if \"owner\" in playlist_json:\n playlist.spotify_creator_id = playlist_json[\"owner\"][\"id\"]\n playlist.spotify_creator_name = playlist_json[\"owner\"][\"display_name\"]\n playlist.save()\n # Get playlist tracks\n tracks_response = requests.get(query_url + \"/tracks\", headers=query_headers)\n if tracks_response.status_code != 200:\n return tracks_response.reason, 500\n tracks_json = tracks_response.json()\n if \"error_description\" in tracks_json:\n return tracks_json[\"error_description\"], 500\n # Get list of tracks\n index = -1\n while \"next\" in tracks_json and tracks_json[\"next\"] is not None:\n for j in tracks_json[\"items\"]:\n index += 1\n track = track_from_spotify_json(j[\"track\"])\n try:\n PlaylistTrack.objects.create(\n playlist=playlist, track=track, index=index\n )\n except IntegrityError as e:\n print(\"Error adding track {}: {}\".format(str(track), str(e)))\n continue\n tracks_json = requests.get(tracks_json[\"next\"], headers=query_headers).json()\n return str(playlist.playlist_id), 200", "def create_playlist(self, data):\n pass", "def __init__(self, url, params=None):\n super(LivestreamVideo, self).__init__(url, params)\n self.video_id = self.get_video_id()\n self.livestream_user = self.get_username()", "def __init__(self):\n\n self.storage: list = Storage()\n\n # Start for get data in API and set in storage\n self._set_proxies_in_storage()", "def __init__(self, servicename):\n bus = pydbus.SessionBus()\n self.name = servicename\n self._proxy = bus.get(self.name, '/org/mpris/MediaPlayer2')\n self.player = self._proxy[self.player_interface]\n self.properties = self._proxy[self.properties_interface]\n # tracklist is an optional interface\n try:\n self.tracklist = self._proxy[self.tracklist_interface]\n except KeyError:\n self.tracklist = None\n # playlists is an optional interface\n try:\n self.playlists = self._proxy[self.playlists_interface]\n except KeyError:\n self.playlists = None", "def __init__(\n self,\n vector_url,\n vector_shape,\n metadata_url,\n sprite_url=None,\n image_size=None,\n title=\"Comet Embedding\",\n ):\n self.vector_url = vector_url\n self.vector_shape = vector_shape\n self.metadata_url = metadata_url\n self.sprite_url = sprite_url\n self.image_size = image_size\n self.title = title", "def __init__(self, id_: str, bio: str) -> None:\n\n # YOUR CODE HERE\n self.userid = id_\n self.bio = bio\n self.tweets = []", "def __init__(self):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n self.api = tweepy.API(auth)", "def __init__(self):\n List.__init__(self)", "def __init__(self, title, storyline, poster_image_url, trailer_youtube_url):\n self.title = title\n self.storyline = storyline\n self.poster_image_url = poster_image_url\n self.trailer_youtube_url = trailer_youtube_url", "def __init__(self):\n self.api = Api(consumer_key=credentials[\"consumer_key\"],\n consumer_secret=credentials[\"consumer_secret\"],\n access_token_key=credentials[\"access_token_key\"],\n access_token_secret=credentials[\"access_token_secret\"])", "def __init__(self,\n title, trailer_youtube_url, poster_image_url, trailer_youtube_id):\n # Set the title of the class to the one passed into the function\n self.title = title\n # Set the youtube trailer url of the class to the one passed into the function\n self.trailer_youtube_url = trailer_youtube_url\n # Set the poster image url of the class to the one passed into the function\n self.poster_image_url = poster_image_url\n # Set the youtube trailer id of the class to the one passed into the function\n self.trailer_youtube_id = trailer_youtube_id", "def createspotifyplaylist(accesstoken, name, playlists, tracklist, userid):\n\n # find a unique name for the playlist\n playlistname = \"{} - flowed\".format(name)\n if playlistname in playlists:\n num = 1\n playlistname = \"{} - flowed ({})\".format(name, num)\n while playlistname in playlists:\n num = num + 1\n playlistname = \"{} - flowed ({})\".format(name, num)\n\n # create playlist\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"name\"] = playlistname\n\n url = \"https://api.spotify.com/v1/users/{}/playlists\".format(userid)\n\n r = requests.post(url, headers=headers, json=payload)\n\n response = r.json()\n\n\n if \"collaborative\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n retry = True\n while retry:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n r = requests.post(url, headers=headers, json=payload)\n response = r.json()\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n continue\n else:\n print(\"error: problem creating spotify playlist\")\n print(response[\"error\"])\n return(False)\n elif \"collaborative\" in response:\n break\n else:\n print(\"error: problem creating spotify playlist\")\n print('no error response')\n return(False)\n else: \n print(\"error: problem creating spotify playlist\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: problem creating spotify playlist\")\n print('no error response')\n return(False)\n\n playlistid = response[\"id\"]\n playlisturl = response[\"external_urls\"][\"spotify\"]\n\n # add tracks to playlist\n while len(tracklist) > 100:\n\n # add first 100\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"uris\"] = tracklist[:100]\n\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n\n response = r.json()\n if \"snapshot_id\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n continue\n else:\n print(\"error: problem adding songs to playlist\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: problem adding songs to playlist\")\n print(\"no error response\")\n return(False)\n\n tracklist = tracklist[100:]\n\n if tracklist:\n\n # add the remainder of the tracks\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"uris\"] = tracklist\n\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n\n response = r.json()\n if \"snapshot_id\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n retry = True\n while retry:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n response = r.json()\n if \"snapshot_id\" in response:\n break\n elif response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n continue\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(\"no error response\")\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(\"no error response\")\n return(False)\n\n return(playlistname, playlisturl)", "def __init__(self, api_url):\n self.api_url = api_url", "def __init__(self, player):\n self.player = player", "def __init__(self, name, storyline, trailer, poster):\n # Assigning the values of the instances to the class variables\n self.title = name\n self.mov_story = storyline\n self.trailer_youtube_url = trailer\n self.poster_image_url = poster", "def __init__(self):\n self.item_list = []", "def __init__(self):\n\n # TODO: Add login and data grab logic", "def create_playlist(access_token):\n request_body = json.dumps({\n \"name\": \"SpotiAdd\",\n \"description\": \"All Liked Youtube Videos\",\n \"public\": True\n })\n userId = getUserId(access_token)\n query = \"https://api.spotify.com/v1/users/{}/playlists\".format(\n userId)\n response = requests.post(\n query,\n data=request_body,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(access_token)\n }\n )\n response_json = response.json()\n # print(\"create_playlist_id : {}\".format(response_json),file = sys.stdout)\n return response_json[\"id\"]", "def __init__(self, **kwargs):\r\n\t\tself.storage = MySQLStorage(**kwargs)\r\n\r\n\t\tself.nlp = spacy.load('en_pineapple')\r\n\r\n\t\tself.stopWords = set(stopwords.words('english'))", "def __init__(self, player):\n\t\tself.player = player", "def initialize(self):\n self.voteskips = []\n self.response = {}\n self.route = {}\n self.userlist = []\n self.poll = []\n self.media = []\n self.init = False\n self.question = None\n self.jumble = None\n self.imgur = None", "def __init__(self, movie_title, poster_image_url, trailer_youtube_url):", "def __init__(self):\n self.List_store = []", "def __init__(self, helper=None):\n self.helper = helper\n self.sp_manager = SPManager(helper.handle, helper.service_profile)", "def __init__(self, username, email, password):\n self.username = username\n self.email = email\n self.password = password\n self.shopping_lists = []\n\n self.details = {\n 'username': self.username,\n 'email': self.email,\n 'password': self.password,\n 'shopping_lists': self.shopping_lists\n }", "def __init__(self):\n self.storage = None\n self.user = None\n self.master_collection = None\n self.current_collection = None", "async def playlist(self, ctx, *, query):\n # Setup the headers with the token that should be here\n headers = {\"Authorization\": \"Bearer {}\".format(self._token)}\n opts = {\"q\": query, \"type\": \"playlist\"}\n url = \"https://api.spotify.com/v1/search\"\n response = await utils.request(url, headers=headers, payload=opts)\n try:\n await ctx.send(\n response.get(\"playlists\")\n .get(\"items\")[0]\n .get(\"external_urls\")\n .get(\"spotify\")\n )\n except (KeyError, AttributeError, IndexError):\n await ctx.send(\"Couldn't find a song for:\\n{}\".format(query))", "def __init__(self, **kwargs):\n config = getattr(settings, 'ROCKET', None)\n if config is None:\n config = {}\n\n self.mode = config.get('mode') or 'editor'\n self.files = config.get('files') or './tracks'\n self.project = config.get('project') or 'project.xml'\n self.rps = config.get('rps', 24)\n self.start_paused = False\n\n self.controller = TimeController(self.rps)\n if self.mode == 'editor':\n self.rocket = Rocket.from_socket(self.controller, track_path=self.files)\n self.start_paused = True\n elif self.mode == 'project':\n self.rocket = Rocket.from_project_file(self.controller, self.project)\n elif self.mode == 'files':\n self.rocket = Rocket.from_files(self.controller, self.files)\n else:\n raise ValueError(\"Unknown rocket mode: '{}'\".format(self.mode))\n\n # Register tracks in the editor\n # Ninja in pre-created track objects\n for track in tracks.tacks:\n self.rocket.tracks.add(track)\n\n # Tell the editor about these tracks\n for track in tracks.tacks:\n self.rocket.track(track.name)\n\n self.rocket.update()\n super().__init__(**kwargs)", "def __init__(self, players):\n\n self._players = players\n self._current_player = players.get()", "def set_playlist(self, playlist):\n self._playlist = playlist", "def __init__(self):\n this = _libsbml.new_ListWrapperSBase()\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self.emotions_list = EmotionsList('NRC-Emotion-Intensity-Lexicon-v1.txt')\n self.tweets_list = None\n self.nickname = None", "def __init__(self, api_use=False):\n self.api_use = api_use", "def __init__(self, **attrs):\n \n self.list_id = None\n self.name = None\n self.pieces = None\n \n super().__init__(**attrs)", "def __init__(self):\n this = _libsbml.new_ListWrapperModelCreator()\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, title, image_url, trailer_url):\n self.title = title\n self.trailer_youtube_url = trailer_url\n self.poster_image_url = image_url", "def __init__(self, **attrs):\n \n self.list_id = None\n self.name = None\n self.items = None\n \n super().__init__(**attrs)", "def __init__(self, movie_list, handler):\n self = self\n self.movie_list = movie_list\n self.handler = handler", "def setup_lists(self):\n pass", "def __init__(self, title, storyline, poster_image, trailer_youtube):\n\n self.title = title\n self.storyine = storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube", "def __init__(self, url, params=None):\n super(YoutubeVideo, self).__init__(url, params)\n self.video_id = self.get_video_id()", "def main():\n\tdescription = \"Utility to search for spotify by song, artist or song ID and to create playlists based off of song ID's\"\n\tusage = \"search.py [-h] [-s SONG | -a ARTIST | -i ID] [-p PLAYLIST & -u USERNAME & -i ID & -d DESCRIPTION]\"\n\tparser = argparse.ArgumentParser(description=description, usage=usage)\n\tgroup = parser.add_mutually_exclusive_group()\n\tgroup.add_argument(\"-s\", \"--song\", nargs=1, required='--argument' in sys.argv, help=\"Search for a song by name\")\n\tgroup.add_argument(\"-a\", \"--artist\", nargs=1, required='--argument' in sys.argv,\n\t\t\t\t\t help=\"Search for songs from an Artist\\n\")\n\tgroup.add_argument(\"-i\", \"--id\", nargs=1, required='--argument' in sys.argv,\n\t\t\t\t\t help=\"Search for song based on ID or create playlist based off of song ID\")\n\tparser.add_argument(\"-p\", \"--playlist\", nargs=1, required='--id' in sys.argv,\n\t\t\t\t\t\thelp=\"Name of the playlist to be created. MUST be used with -i/--id\")\n\tparser.add_argument(\"-d\", \"--description\", nargs=1, required='--argument' in sys.argv,\n\t\t\t\t\t\thelp=\"Playlist Description. Must be used with -p,-i and -u\")\n\tparser.add_argument(\"-u\", \"--username\", nargs=1, required='--argumnet' in sys.argv,\n\t\t\t\t\t\thelp=\"Spotify Username. Must be used with -p, -i and -d\")\n\targs = parser.parse_args()\n\t# print(args)\n\n\tsolr = Solr_Query()\n\n\tresponse = None\n\n\tif args.song:\n\t\tprint(\"Searching for song:\", args.song[0].strip())\n\t\tsong_name = args.song[0].strip()\n\t\tsolr.set_search_type(\"songs\")\n\t\tquery = solr.set_query(song_name)\n\t\tresponse = solr.exec_query(query)\n\t\tsolr.print_search_results(response)\n\n\tif args.artist:\n\t\tprint(\"Searching for songs by artist: \", args.artist[0].strip())\n\t\tartist = args.artist[0].strip()\n\t\tsolr.set_search_type(\"artists\")\n\t\tquery = solr.set_query(artist)\n\t\tresponse = solr.exec_query(query)\n\t\tsolr.print_search_results(response)\n\n\t# Still trying to figure this one out. The getmorelike this funcionality is harder than we thought\n\tif args.playlist and args.id and args.description and args.username:\n\t\tprint(\"Creating a playlist based off of song ID:\", args.id[0].strip())\n\t\tid = args.id[0].strip()\n\t\tdescription = args.description[0].strip()\n\t\tplaylist = args.playlist[0].strip()\n\t\tusername = args.username[0].strip()\n\n\t\tsolr.set_search_type(\"id\")\n\t\tquery = solr.set_query(id)\n\t\tresponse = solr.exec_query(query)\n\n\t\t# Create a playlist create object to find similar songs and create the playlist\n\t\tcreator = Playlist_Create(username, playlist, description)\n\t\tcreator.authenticate() # authenticate using the username passed in\n\t\tresponse = creator.get_similar_songs(response)\n\t\tsongs = creator.get_song_ids(response)\n\t\tplaylist_id = creator.create_playlist()\n\t\tcreator.add_songs(playlist_id, songs)\n\n\n\n\telif args.playlist and not args.id:\n\t\tparser.error(\"Must input a song ID to create a playlist with!\")\n\telif args.playlist and not args.description:\n\t\tparser.error(\"Must input a playlist description\")\n\telif args.playlist and not args.username:\n\t\tparser.error(\"Need your username to create the playlist\")\n\n\tif args.id:\n\t\tprint(\"Searching for song with ID:\", args.id[0].strip())\n\t\tid = args.id[0].strip()\n\t\tsolr.set_search_type(\"id\")\n\t\tquery = solr.set_query(id)\n\t\tresponse = solr.exec_query(query)\n\t\tsolr.print_search_results(response)\n\n\tprint(\"\\nDone!\")", "def obj_create(self, bundle, request=None, **kwargs):\n return super(PlaylistResource, self).obj_create(bundle, request, user=request.user)", "def __init__(self):\n load_dotenv()\n self.api_url_base = os.getenv('API_URL_BASE')\n self.token = os.getenv('API_TOKEN')\n self.title_map = defaultdict(list)", "def __init__(self, name, remote):\n # Save a reference to the imported class\n self._name = name\n # Assume that the TV is not muted\n self._muted = False\n # Assume that the TV is in Play mode\n self._playing = True\n self._state = STATE_UNKNOWN\n self._remote = remote\n self._volume = 0", "def __init__(self):\n # keys and tokens from the Twitter Dev Console\n key = provide_keys('males')\n\n consumer_key = key['consumer_key']\n consumer_secret = key['consumer_secret']\n access_token = key['access_token_key']\n access_token_secret = key['access_token_secret']\n\n # attempt authentication\n\n # create OAuthHandler object\n self.auth = OAuthHandler(consumer_key, consumer_secret)\n\n # set access token and secret\n self.auth.set_access_token(access_token, access_token_secret)\n\n try:\n # create tweepy API object to fetch tweets\n self.api = tweepy.API(self.auth)\n\n except:\n print(\"Error: Authentication Failed\")\n sys.exit(-1)", "def __init__(self):\r\n self.playerPreparers = [WhitePlayerPreparer(), BlackPlayerPreparer()]", "async def spotify(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send('Invalid command passed. Use the `?help spotify` command to learn more.')", "def __init__(self, sitename, popularity, content, latency):\n self.sitename = sitename\n self.content = content\n self.latency = latency\n self.popularity = popularity", "def __init__(self, api_token):\r\n self.apiroot = 'https://api.pipedrive.com/v1'\r\n self.api_token = api_token\r\n self.add_filter(self.add_auth)", "def add_tracks():\n sp = credentials()\n tracks = spotify_tracklist()\n playlist_id = grab_playlist()\n sp.user_playlist_add_tracks('truetiming', playlist_id, tracks)", "def __init__(self, title):\n\n self.__recent = []\n self.__lasturl = ''", "def __init__(self, title, poster_image_url, trailer_youtube_id):\n\t\tself.title = title\n\t\tself.poster_image_url = poster_image_url\n\t\tself.trailer_youtube_url = trailer_youtube_id", "def __init__(self, reddit):\n self._reddit = reddit", "def _create_user_object(self) -> None:\n\n token = util.prompt_for_user_token(self._USERNAME, self.scope, self._CLIENT_ID, self._CLIENT_SECRET, self.redirect_uri)\n self.spotipyObject = spotipy.Spotify(auth=token)", "def __init__(self, twitter_consumer_key, twitter_consumer_secret,\n twitter_access_key, twitter_access_secret,\n search_terms, search_on='news',\n bitly_access_token='',\n news_api_key=''):\n\n # Access Keys and Secrets for Twitter API obtained at: https://developer.twitter.com/\n auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)\n auth.set_access_token(twitter_access_key, twitter_access_secret)\n\n # Store API object for access to Twitter REST API\n self.__api = tweepy.API(auth)\n\n # Term(s) to search news feeds or Twitter on\n self.search_terms = search_terms\n\n # Method TwitterBot will use to search on. Current options are 'news' or 'twitter'\n self.search_on = search_on\n\n # Access token for optional Bitly API: https://dev.bitly.com/\n self.__bitly_access_token = bitly_access_token\n\n # Access token for optional News API: https://newsapi.org/\n self.__news_api_key = news_api_key\n\n # Will store list of items scraped from news or Twitter\n self.list = []", "def __init__(self, api_key):\n self.api_key = api_key\n self.base_url = 'https://studio.spotflock.com/api/v1'", "def __init__(self, verbose):\n\n self._sites = []\n self._verbose = verbose", "def create_playlist():\n sp = credentials()\n sp.user_playlist_create('truetiming', name='Billboard Hot 100')", "def __init__(\n self,\n clientID,\n secretID,\n redirctURI,\n username\n ):\n\n print('SpotifClient starts...')\n \n self.client_id = clientID\n self.secret_id = secretID\n self.redirect_uri = redirctURI\n self.username = username\n self._isConnected = False\n\n #self.Connect()", "def __init__(self, lyrics_url, artist=None, album_title=None, folder_path=None, song_order=None, cover_size='600'):\n self.album = Album(title=album_title, artist=artist)\n self.artist = artist\n self.album_title = album_title\n self.lyrics_url = lyrics_url\n self.song_order = song_order\n self.folder_path = Path(folder_path) if folder_path else None\n self.cover_file_name = 'cover.jpg'\n self.cover_size = f'{cover_size}x{cover_size}'\n self.track_urls = []\n self.cover_downloaded = False\n \n # self.r = requests.get(lyrics_url).text\n # self.soup = BeautifulSoup(self.r, 'html.parser')\n self.r = requests.get(lyrics_url)\n self.soup = BeautifulSoup(self.r.content, 'html.parser')", "def setup_class(cls):\n initialize(api_key=API_KEY, app_key=APP_KEY, api_host=API_HOST)", "def init_auth_client(self):\n with open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n token = util.prompt_for_user_token(\n cfg['username'],\n scope=cfg['scope'],\n client_id=cfg['spotipy_client_id'],\n client_secret=cfg['spotipy_client_secret'],\n redirect_uri=cfg['spotipy_redirect_uri'])\n sp = spotipy.Spotify(auth=token)\n return sp, cfg['username']", "def playlists(self):\r\n return v3.Playlists(self)", "def __init__(self):\n #print (\"Object created\")\n self.apikey='acc_4fc1a435b3188b5'\n self.secret = 'f49c4be14a048d5de7e7f6c564b52022'\n self.fileToIdMap = {}", "def __init__(self, connector):\n self.con = connector\n self.words = self.con.getWordsAPI()\n self.serverSettings = {}\n self.custom = {}\n self.ignored = {}\n self.whitelist = {}\n self.requestLog = []\n self.retrieveGuildsInfo()\n self.retrieveCustomWords()\n self.retrieveIgnoredWords()\n self.retrieveWhitelist()", "def __init__(self, auth):\n super(Socrata, self).__init__(auth)\n self.views = Views(auth)\n self.sources = Sources(auth)\n self.configs = Configs(auth)", "def __init__(self, api_url, credentials):\n self.api_url = api_url\n while self.api_url.endswith(\"/\"):\n self.api_url = self.api_url[:-1]\n self.credentials = credentials\n self.username = credentials.username\n self.token = credentials.token", "def __init__(self):\n\n self.lastcid=0\n self.calls = { }\n\n SessionList.__init__(self)", "def __init__(self, url, **kwargs):\n super(Play, self).__init__(**kwargs)\n self.value = url" ]
[ "0.72976667", "0.7259575", "0.6638557", "0.6578827", "0.6547392", "0.6536785", "0.6459842", "0.6452237", "0.6372635", "0.6340433", "0.6314363", "0.6273033", "0.62022495", "0.61825216", "0.61784863", "0.61702627", "0.6157572", "0.6012808", "0.59975034", "0.59931904", "0.5950478", "0.5914496", "0.591195", "0.5911734", "0.59014046", "0.58883506", "0.583911", "0.57777", "0.5772134", "0.5763367", "0.57528657", "0.57437867", "0.57216597", "0.5720842", "0.57204175", "0.5714322", "0.56952834", "0.5675343", "0.5660685", "0.56596947", "0.5658006", "0.56577253", "0.5647892", "0.5646859", "0.5644365", "0.5641865", "0.5624771", "0.56186545", "0.5612486", "0.5611178", "0.5607552", "0.55954087", "0.5594316", "0.5592577", "0.55876976", "0.5586795", "0.5585409", "0.5581099", "0.5576308", "0.5571661", "0.5568029", "0.5567192", "0.55668944", "0.5558996", "0.55558103", "0.5550226", "0.5546728", "0.55445486", "0.5542963", "0.55358195", "0.553344", "0.5531389", "0.5528607", "0.5526398", "0.5525392", "0.55208904", "0.5519746", "0.5519604", "0.5513007", "0.55096763", "0.55071205", "0.55032855", "0.5502431", "0.5501998", "0.5500118", "0.5499054", "0.5493819", "0.5488356", "0.5483896", "0.54835564", "0.5479911", "0.54788214", "0.54763687", "0.54737604", "0.54646826", "0.54594475", "0.54534596", "0.54525113", "0.5450078", "0.54464656", "0.5445982" ]
0.0
-1
[authorize and initialize spotify client]
def init_auth_client(self): with open("config.yml", 'r') as ymlfile: cfg = yaml.load(ymlfile) token = util.prompt_for_user_token( cfg['username'], scope=cfg['scope'], client_id=cfg['spotipy_client_id'], client_secret=cfg['spotipy_client_secret'], redirect_uri=cfg['spotipy_redirect_uri']) sp = spotipy.Spotify(auth=token) return sp, cfg['username']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_user(self) -> Any:\n return \\\n spotipy.Spotify(auth_manager=spotipy.oauth2.SpotifyOAuth(scope=\"playlist-modify-public\",\n client_id=self._public_id, client_secret=self._secret_id,\n redirect_uri=self._redirect_uri))", "def authenticate_spotify_api(SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET):\r\n auth_manager = SpotifyClientCredentials(client_id = SPOTIPY_CLIENT_ID, \r\n client_secret=SPOTIPY_CLIENT_SECRET)\r\n \r\n return spotipy.Spotify(auth_manager=auth_manager)", "def auth(self):\n token = spotipy.util.prompt_for_user_token(self.username,\n self.scope,\n client_id = self.client_id,\n client_secret = self.client_secret,\n redirect_uri= self.redirect_uri)\n if token:\n self.spotify = spotipy.Spotify(auth=token)\n else:\n print(colored.stylize(\"\"\"\\n[*] \"\"\", colored.fg(\"light_red\")) + 'Cant get token for: %s\\n' % (self.username))\n exit()", "def __init__(self, username):\n self.spotify = spotipy.Spotify(simple_auth_token(username))", "def __init__(\n self,\n clientID,\n secretID,\n redirctURI,\n username\n ):\n\n print('SpotifClient starts...')\n \n self.client_id = clientID\n self.secret_id = secretID\n self.redirect_uri = redirctURI\n self.username = username\n self._isConnected = False\n\n #self.Connect()", "def Connect(self,scope):\n\n \"\"\"\n Calling util.prompt_for_user_token will open Spotify’s application authorization\n page in your browser (and require you to log in if you are not already logged in\n to spotify.com), unless a locally cached access token exist from a previous authorization/authentication.\n \"\"\"\n try:\n token = util.prompt_for_user_token(\n self.username,\n scope,\n self.client_id,\n self.secret_id,\n self.redirect_uri)\n except ImportError:\n self._isConnected = False\n print(\" onnecting to Spotify failed\") \n\n\n if token:\n sp = spotipy.Spotify(auth=token)\n self._isConnected = True\n return sp\n else:\n print(\"Can't get token for\", self.username)\n self._isConnected = False", "async def initialize(self):\r\n self.access_token = await async_get_value(SPOTIFY_ACCESS_TOKEN)\r\n self.refresh_token = await async_get_value(SPOTIFY_REFRESH_TOKEN)\r\n self.should_poll = await async_get_value(SPOTIFY_SHOULD_POLL)\r\n request_code = self.get_currently_playing().status_code\r\n if request_code == requests.codes.ok or request_code == requests.codes.no_content:\r\n self.start_polling_and_refresh()\r\n return\r\n\r\n # Go through the oauth flow.\r\n self.auth_thread = StoppableThread(target=self.check_and_test_auth)\r\n self.auth_thread.start()\r\n return", "def authorize():\n scopes = 'playlist-modify-public playlist-modify-private playlist-read-private playlist-read-collaborative user-read-email user-read-private'\n\n spotify_authorize_url = 'https://accounts.spotify.com/authorize?'\n params = {\n 'response_type': 'code', \n 'client_id': SPOTIFY_CLIENT_ID,\n 'redirect_uri': 'http://0.0.0.0:5000/callback',\n 'scope': scopes, \n 'show_dialog': True\n }\n\n query_params = urllib.parse.urlencode(params)\n response = make_response(redirect(spotify_authorize_url + query_params))\n return response", "def authenticate(redirect_uri, client_cred_manager, username, scope,client_id,client_secret):\r\n\r\n sp = spotipy.Spotify(client_credentials_manager = client_cred_manager)\r\n token = util.prompt_for_user_token(username, scope, client_id, client_secret, redirect_uri)\r\n if token:\r\n sp = spotipy.Spotify(auth=token)\r\n else:\r\n print(\"Can't get token for\", username)\r\n return sp", "def authorize():\n encoded_auth = base64.b64encode(\n (os.environ[\"SPOTIFY_CLIENT_ID\"] + ':' + os.environ[\"SPOTIFY_CLIENT_SECRET\"]).encode())\n headers = {\n 'Authorization': 'Basic {}'.format(encoded_auth.decode(\"utf-8\"))\n }\n\n response = requests.post(os.environ['SPOTIFY_AUTH_URL'], data={'grant_type': 'client_credentials'},\n headers=headers).text\n return json.loads(response)", "def async_setup(hass, config):\n import spotipy.oauth2\n import json\n global AIS_SPOTIFY_TOKEN\n\n try:\n ws_resp = aisCloud.key(\"spotify_oauth\")\n json_ws_resp = ws_resp.json()\n spotify_redirect_url = json_ws_resp[\"SPOTIFY_REDIRECT_URL\"]\n spotify_client_id = json_ws_resp[\"SPOTIFY_CLIENT_ID\"]\n spotify_client_secret = json_ws_resp[\"SPOTIFY_CLIENT_SECRET\"]\n spotify_scope = json_ws_resp[\"SPOTIFY_SCOPE\"]\n try:\n ws_resp = aisCloud.key(\"spotify_token\")\n key = ws_resp.json()[\"key\"]\n AIS_SPOTIFY_TOKEN = json.loads(key)\n except:\n AIS_SPOTIFY_TOKEN = None\n _LOGGER.info(\"No AIS_SPOTIFY_TOKEN\")\n except Exception as e:\n _LOGGER.error(\"No spotify oauth info: \" + str(e))\n return False\n\n cache = hass.config.path(DEFAULT_CACHE_PATH)\n gate_id = ais_global.get_sercure_android_id_dom()\n oauth = spotipy.oauth2.SpotifyOAuth(spotify_client_id, spotify_client_secret, spotify_redirect_url,\n scope=spotify_scope, cache_path=cache, state=gate_id)\n token_info = oauth.get_cached_token()\n if not token_info:\n _LOGGER.info(\"no spotify token in cache;\")\n if AIS_SPOTIFY_TOKEN is not None:\n with open(cache, 'w') as outfile:\n json.dump(AIS_SPOTIFY_TOKEN, outfile)\n token_info = oauth.get_cached_token()\n if not token_info:\n _LOGGER.info(\"no spotify token; run configurator\")\n async_request_configuration(hass, config, oauth)\n return True\n\n if hass.data.get(DOMAIN):\n configurator = hass.components.configurator\n configurator.request_done(hass.data.get(DOMAIN))\n del hass.data[DOMAIN]\n\n # register services\n data = hass.data[DOMAIN] = SpotifyData(hass, oauth)\n\n # service = configured_service(hass)\n\n @asyncio.coroutine\n def search(call):\n _LOGGER.info(\"search \" + str(call))\n yield from data.process_search_async(call)\n\n def select_track_name(call):\n _LOGGER.info(\"select_track_name\")\n data.process_select_track_name(call)\n\n def change_serive(call):\n _LOGGER.info(\"change_serive\")\n data.change_serive(call)\n\n hass.services.async_register(DOMAIN, 'search', search)\n hass.services.async_register(DOMAIN, 'select_track_name', select_track_name)\n hass.services.async_register(DOMAIN, 'change_serive', change_serive)\n\n return True", "def authorize(self):\n\t\ttry:\n\t\t\tauth_url = 'https://accounts.spotify.com/api/token'\n\t\t\theaders={}\n\t\t\tdata={}\n\n\t\t\tdata_string = f\"{self.client_id}:{self.client_secret}\"\n\n\t\t\tdata_bytes = data_string.encode(\"ascii\")\n\t\t\tbase_bytes = base64.b64encode(data_bytes)\n\t\t\tbase_message = base_bytes.decode(\"ascii\")\n\n\t\t\theaders['Authorization'] = f\"Basic {base_message}\"\n\n\t\t\tdata = parse.urlencode({\"grant_type\": \"client_credentials\"})\n\t\t\tdata = data.encode('ascii')\n\n\t\t\treq = request.Request(auth_url,data=data, headers=headers)\n\t\t\tlogging.info(\"Successfully called Spotify token API!\")\n\t\texcept:\n\t\t\tlogging.error(\"Failed to create authorization request!\")\n\t\t\treturn False\n\t\t\t\n\t\tif req is not None:\n\t\t\ttry:\n\t\t\t\tresponse = request.urlopen(req).read().decode()\n\t\t\texcept error.URLError as e:\n\t\t\t\tresponse = e.read().decode(\"utf8\", 'ignore')\n\t\t\t\tlogging.error(response)\n\t\t\t\treturn False\n\t\t\n\t\ttry:\n\t\t\t_json = json.loads(response)\n\t\t\tself.token = _json[\"access_token\"]\n\t\t\tlogging.info(\"Successfully received token from Spotify!\")\n\t\texcept:\n\t\t\tlogging.error(\"Could not fetch token from response!\")\n\t\t\treturn False\n\t\t\t\n\t\treturn True", "def __init__(self):\n self.authurl = Config().auth\n self.baseurl = Config().api\n self.s = Session()\n self.s.headers = {'Accept': 'application/json'}\n data = {\"grant_type\": \"client_credentials\", \"scope\": \"/read-public\", \"client_id\": Config().client_id,\n \"client_secret\": Config().client_secret}\n r = self.s.request(method=\"post\", url=self.authurl, data=data)\n self.s.headers = {'Accept': 'application/json', \"Access token\": r.json()[\"access_token\"]}", "def init_api():\n global soundcloud\n import json\n \n SECRETS_VERSION = 1\n \n # Load secrets file\n if os.path.exists(config.token_cache):\n with open(config.token_cache, 'r', encoding='utf-8') as f:\n secrets = json.load(f)\n else:\n secrets = {}\n \n # Try to reuse the cached access token\n if secrets\\\n and secrets['version'] == SECRETS_VERSION\\\n and secrets['access_token_acquired_at'] + secrets['access_token_expires_in'] > time() - 5 * 60\\\n and secrets['username'] == config.username:\n \n soundcloud = Soundcloud(\n client_id=config.client_id,\n client_secret=config.client_secret,\n access_token=secrets['access_token']\n )\n return\n \n # Get a new access token\n logging.info('Getting a new access token') \n try:\n soundcloud = Soundcloud(\n client_id=config.client_id,\n client_secret=config.client_secret,\n username=config.username,\n password=config.password\n )\n except HTTPError as e:\n if e.response.status_code == 401:\n logging.critical('Incorrect API key, login or password. Please, edit config.py.')\n sys.exit(1)\n else:\n raise\n \n # Save the token\n secrets = {\n 'version': SECRETS_VERSION,\n 'username': config.username,\n 'access_token': soundcloud.access_token,\n 'access_token_acquired_at': time(),\n 'access_token_expires_in': soundcloud.token.expires_in,\n }\n \n with open(config.token_cache, 'w', encoding='utf-8') as f:\n secrets = json.dump(secrets, f, indent='\\t', ensure_ascii=False)", "def __init__(self):\n self.api = Api(consumer_key=credentials[\"consumer_key\"],\n consumer_secret=credentials[\"consumer_secret\"],\n access_token_key=credentials[\"access_token_key\"],\n access_token_secret=credentials[\"access_token_secret\"])", "def __init__(self, client_auth_type, client_id, client_secret=None):\n self.client_auth_type = client_auth_type\n self.client_id = client_id\n self.client_secret = client_secret", "def __init__(self, client_access_token, artist_name):\n self.client_access_token = client_access_token\n self.artist_name = artist_name\n self.base_url = 'https://api.genius.com/'\n self.headers = {'Authorization': 'Bearer ' + self.client_access_token}\n self.artist_songs = None", "def __init__(self,\n client_id,\n client_secret):\n self.__client_id = client_id\n self.__client_secret = client_secret", "def create_token():\n def token_helper():\n token = util.prompt_for_user_token(username=\"robbo1992\", scope='user-library-read playlist-modify-private playlist-modify',\n client_id=config[\"spotify\"][\"client_id\"], client_secret=config[\"spotify\"][\"secret_id\"],\n redirect_uri='http://localhost:8080', cache_path=spotify_cache)\n return token\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n if motley.internet:\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n log.error(\"Authentication error in create_token method.\")", "def _authorize(self):\n auth = tweepy.OAuthHandler(self.keys[\"consumer_key\"], self.keys[\"consumer_secret\"])\n auth.set_access_token(self.keys[\"access_token\"], self.keys[\"access_token_secret\"])\n return tweepy.API(auth)", "def __init__(self):\n self.sp, self.user = self.init_auth_client()\n self.logger = logging.getLogger(__name__)", "def __init__(self, credentials):\n self.credentials = credentials\n http = httplib2.Http()\n http = self.credentials.authorize(http)\n self.service = build(\"drive\", \"v2\", http=http)", "def do_setup(self, context):\n self.restclient = rest_client.RestClient(self.configuration)\n return self.restclient.login()", "def init_api(self):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(self.gdrive_config.TOKEN_PICK_PATH):\n with open(self.gdrive_config.TOKEN_PICK_PATH, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n self.gdrive_config.CREDENTIAL_PATH, self.gdrive_config.SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(self.gdrive_config.TOKEN_PICK_PATH, 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('drive', 'v3', credentials=creds)\n return service", "def __init__(__self__, *,\n client_id: Optional[pulumi.Input[str]] = None,\n client_secret: Optional[pulumi.Input[str]] = None,\n metadata_url: Optional[pulumi.Input[str]] = None,\n scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if client_id is not None:\n pulumi.set(__self__, \"client_id\", client_id)\n if client_secret is not None:\n pulumi.set(__self__, \"client_secret\", client_secret)\n if metadata_url is not None:\n pulumi.set(__self__, \"metadata_url\", metadata_url)\n if scopes is not None:\n pulumi.set(__self__, \"scopes\", scopes)", "def __init__(self, client_id, token, scope=[\"activity\", \"heartrate\", \"location\", \"nutrition\", \"profile\", \"settings\", \"sleep\", \"social\", \"weight\"]):\n\n\t\tif token['access_token'] == \"\":\n\t\t\t# We need to fetch a token for the user.\n\t\t\tprint(\"Note: looks like we don't have an access token yet. Let's fetch one.\")\n\n\t\t\tself.client = MobileApplicationClient(client_id)\n\t\t\tself.fitbit = OAuth2Session(client_id, client=self.client, scope=scope)\n\n\t\t\tauthorization_base_url = \"https://www.fitbit.com/oauth2/authorize\"\n\n\t\t\tauthorization_url, state = self.fitbit.authorization_url(authorization_base_url)\n\n\t\t\tprint(\"Please go to the following authorization URL: {}\".format(authorization_url))\n\n\t\t\traw_callback_url = input(\"Paste callback URL you get back here: \")\n\n\t\t\tself.fitbit.token_from_fragment(raw_callback_url)\n\t\t\tself.token = self.fitbit.token['access_token']\n\n\t\t\tprint(self.fitbit.token)\n\n\t\telse:\n\t\t\t# We've got an access token, and we'll use it.\n\t\t\tself.client = MobileApplicationClient(client_id)\n\t\t\tself.fitbit = OAuth2Session(client_id, client=self.client, scope=scope, token=token)\n\t\t\tself.token = token['access_token']", "def _create_user_object(self) -> None:\n\n token = util.prompt_for_user_token(self._USERNAME, self.scope, self._CLIENT_ID, self._CLIENT_SECRET, self.redirect_uri)\n self.spotipyObject = spotipy.Spotify(auth=token)", "async def async_setup(hass, config):\n conf = config[DOMAIN]\n\n username = conf[CONF_USERNAME]\n password = conf[CONF_PASSWORD]\n accounts = conf.get(CONF_ACCOUNTS)\n\n @callback\n def websocket_handle_playlists(hass, connection, msg):\n \"\"\"Handle get playlist\"\"\"\n import spotipy\n access_token, expires = get_spotify_token(username=username, password=password)\n client = spotipy.Spotify(auth=access_token)\n resp = client._get('views/made-for-x?content_limit=10&locale=en&platform=web&types=album%2Cplaylist%2Cartist%2Cshow%2Cstation', limit=10,\n offset=0)\n connection.send_message(\n websocket_api.result_message(msg[\"id\"], resp)\n )\n\n def get_spotify_token(username, password):\n import spotify_token as st\n data = st.start_session(username, password)\n access_token = data[0]\n # token_expires = data[1]\n expires = data[1] - int(time.time())\n return access_token, expires\n\n def play(client, spotify_device_id, uri, random_song, repeat):\n # import spotipy\n # import http.client as http_client\n # spotipy.trace = True\n # spotipy.trace_out = True\n # http_client.HTTPConnection.debuglevel = 1\n\n _LOGGER.debug('Version: %s, playing URI: %s on device-id: %s', _VERSION, uri, spotify_device_id)\n if uri.find('track') > 0:\n _LOGGER.debug('Playing track using uris= for uri: %s', uri)\n client.start_playback(device_id=spotify_device_id, uris=[uri])\n else:\n if uri == 'random':\n _LOGGER.debug('Cool, you found the easter egg with playing a random playlist')\n playlists = client.user_playlists('me', 50)\n no_playlists = len(playlists['items'])\n uri = playlists['items'][random.randint(0, no_playlists - 1)]['uri']\n kwargs = {'device_id': spotify_device_id, 'context_uri': uri}\n if random_song:\n results = client.user_playlist_tracks(\"me\", uri)\n position = random.randint(0, results['total'] - 1)\n _LOGGER.debug('Start playback at random position: %s', position)\n kwargs['offset'] = {'position': position}\n\n _LOGGER.debug('Playing context uri using context_uri for uri: \"%s\" (random_song: %s)', uri, random_song)\n client.start_playback(**kwargs)\n if repeat:\n _LOGGER.debug('Turning repeat on')\n time.sleep(5)\n client.repeat(state=repeat, device_id=spotify_device_id)\n\n def get_account_credentials(call):\n \"\"\" Get credentials for account \"\"\"\n account = call.data.get(CONF_SPOTIFY_ACCOUNT)\n user = username\n pwd = password\n if account is not None:\n _LOGGER.debug('setting up with different account than default %s', account)\n user = accounts.get(account).get(CONF_USERNAME)\n pwd = accounts.get(account).get(CONF_PASSWORD)\n return user, pwd\n\n def shouldTransferPlayback(call, client):\n \"\"\" Check if something is playing \"\"\"\n uri = call.data.get(CONF_SPOTIFY_URI)\n if uri is None or uri.strip() == '' or call.data.get(CONF_TRANSFER_PLAYBACK):\n current_playback = client.current_playback()\n if current_playback is not None:\n _LOGGER.debug('current_playback from spotipy: %s', current_playback)\n return True\n return False\n\n async def start_casting(call):\n \"\"\"service called.\"\"\"\n import spotipy\n\n uri = call.data.get(CONF_SPOTIFY_URI)\n random_song = call.data.get(CONF_RANDOM, False)\n repeat = call.data.get(CONF_REPEAT)\n\n # Account\n user, pwd = get_account_credentials(call)\n\n # login as real browser to get powerful token\n access_token, expires = get_spotify_token(username=user, password=pwd)\n\n # get the spotify web api client\n client = spotipy.Spotify(auth=access_token)\n\n # launch the app on chromecast\n spotify_cast_device = SpotifyCastDevice(hass, call.data.get(CONF_DEVICE_NAME), call.data.get(CONF_ENTITY_ID))\n spotify_cast_device.startSpotifyController(access_token, expires)\n spotify_device_id = spotify_cast_device.getSpotifyDeviceId(client)\n\n transfer_playback = shouldTransferPlayback(call, client)\n if transfer_playback == True:\n _LOGGER.debug('Transfering playback')\n client.transfer_playback(\n device_id=spotify_device_id, force_play=True)\n else:\n play(client, spotify_device_id, uri, random_song, repeat)\n\n # Register websocket and service\n hass.components.websocket_api.async_register_command(\n WS_TYPE_SPOTCAST_PLAYLISTS, websocket_handle_playlists, SCHEMA_PLAYLISTS\n )\n\n hass.services.async_register(DOMAIN, 'start', start_casting,\n schema=SERVICE_START_COMMAND_SCHEMA)\n\n return True", "def __init__(self, access_key, secret_key, **kwargs):\r\n pass", "def authorize(self) -> None:\n\n if not self.login_secret:\n #TODO trigger error\n self.login()\n \n\n sObj = Splitwise(self.consumer_key, self.consumer_secret)\n self.access_token = sObj.getAccessToken(\n self.oauth_token,\n self.login_secret,\n self.oauth_verifier\n )", "def authenticate():\n\n # We are uploading and then downloading so we want Musicmanager\n api = Musicmanager()\n\n # Attempt to authenticate and log in\n logged_in = api.login()\n\n # If login() returns false, you have not performed oauth yet, or did not\n # write your credentials to your disk. Using oauth allows authentication\n # without providing plaintext credentials to the application\n if not logged_in:\n print('No oauth credentials found, please authenticate your account')\n\n # Performs oauth and stores generated credentials to Appdirs \n # 'user_data_dir' by default. oauth only needs to be performed once per \n # machine if the credentials are stored, which is the default behavior.\n authenticated = api.perform_oauth(open_browser=True)\n else:\n print('Successfully logged in.\\n')\n\n return api", "def update_access_token(self):\n self.token = util.prompt_for_user_token(self._username, scope,\n client_id=const.CLIENT_ID,\n client_secret=const.CLIENT_SECRET,\n redirect_uri=const.REDIRECT_URL)\n self._client = spotipy.Spotify(auth=self.token)", "def __init__(self, authorization_url, token_url, token_refresh_url, client_id, xapi_key,\n local_server_address=LOCAL_OAUTH_SERVER_URL):\n\n # Grab the client info needed\n self.local_server_address = local_server_address\n self.authorization_url = authorization_url\n self.token_url = token_url\n self.token_refresh_url = token_refresh_url\n self.client_id = client_id\n self.xapi_key = xapi_key", "def client():\n return Client(**common_data.AUTH_ARGS)", "def setup(self):\n # Load application default credentials if they're available.\n self.credentials = self._load_application_default_credentials()\n\n # Otherwise, load credentials from the provided client secrets file.\n # Name of a file containing the OAuth 2.0 information for this\n # application, including client_id and client_secret, which are found\n # on the Credentials tab on the Google Developers Console.\n self.client_secrets = os.path.join(os.path.dirname(__file__),\n self.client_secrets)\n\n credential_store_file = os.path.join(os.path.dirname(__file__),\n self.credential_store_file)\n\n storage = oauthFile.Storage(credential_store_file)\n\n if self.credentials is None or self.credentials.invalid:\n self.credentials = self._load_user_credentials(storage)\n\n # Authorize HTTP object with the prepared credentials.\n http = self.credentials.authorize(http=httplib2.Http())\n\n # Construct and return a service object via the discovery service.\n self.service = discovery.build(self.api_name, self.api_version, http=http)\n return self.service", "def __init__(self, oauth=None, client_id=None):\n\t\tself.oauth = oauth\n\t\tself.client_id = client_id or self.default_client_id", "def get_spotify(s_creds, usernum):\n # Authorize Spotify\n\n token = spotipy.util.prompt_for_user_token(\n s_creds[\"usernames\"][usernum],\n s_creds[\"scopes\"],\n s_creds[\"client_id\"],\n s_creds[\"client_secret\"],\n s_creds[\"redirect_uri\"],\n )\n\n return spotipy.Spotify(auth=token)", "def __init__(self, apikey, secret):\n self.apikey = apikey\n self.secret = secret", "def authorize(self, oauth2_token):\r\n storage = file.Storage(oauth2_token)\r\n credentials = storage.get()\r\n http = credentials.authorize(httplib2.Http())\r\n self.service = discovery.build('youtube', 'v3', http=http)", "def __init__(self):\n self.__client = Client(verify_ssl_cert=True)\n self.__headers = {'Content-Type': 'application/json'}\n self.login()", "def __init__(self, **kwargs):\n self.config = kwargs[\"config\"]\n self.cli = client.DefaultClient(app_key=self.config[\"app_key\"], app_secret=self.config[\"app_secret\"])\n self.req = None", "def __init__(self):\n\n self._authorize()", "def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret):\n self.api = self.getAPI(consumer_key, consumer_secret, access_token, access_token_secret)", "def __init__(self, client_id, client_secret):\n self.client_id = client_id\n self.client_secret = client_secret\n self.token = None\n self.request_time = None\n self._initialized = False", "def __init__(self, client=\"ANDROID_EMBED\"):\n self.context = self._DEFAULT_CLIENTS[client][\"context\"]\n self.api_key = self._DEFAULT_CLIENTS[client][\"api_key\"]", "def __init__(self, api_key, client_id=None, client_secret=None):\n self.api = API(api_key)\n self._manifest = Manifest(self.api)\n self.oauth = OAuth(client_id, client_secret)", "def get_spotify_authtoken(client_id, client_secret, scope, refresh_token=None,\n redirect_uri=\"https://example.com/callback\"):\n\n # If refresh token has been passed in, try to use it to generate a\n # new auth_token.\n\n if refresh_token:\n # Setup Base64 Client Secret to Send\n secret = f\"{client_id}:{client_secret}\"\n b64_secret = base64.b64encode(bytes(secret, \"utf-8\")).decode(\"utf-8\")\n\n body = {\"grant_type\": \"refresh_token\",\n \"refresh_token\": refresh_token}\n auth_url = \"https://accounts.spotify.com/api/token\"\n auth_header = {\"Authorization\": f\"Basic {b64_secret}\"}\n\n res = requests.post(auth_url, data=body, headers=auth_header)\n\n auth_token = res.json()[\"access_token\"]\n try:\n refresh_token = res.json()[\"refresh_token\"]\n except Exception:\n refresh_token = None\n\n # If no refresh token is available, generate a new auth_token by\n # prompting the user to login and authorise the application.\n\n else:\n auth_url = f\"https://accounts.spotify.com/authorize?client_id={client_id}&response_type=code&redirect_uri={redirect_uri}&scope={scope}\"\n\n # Setup Browser\n opts = Options()\n opts.add_argument('--no-sandbox')\n browser = Chrome(\"./chromedriver/chromedriver\", options=opts)\n\n # Go to auth page, sign-in and wait for code to be returned\n browser.get(auth_url)\n WebDriverWait(browser, 60).until(EC.url_contains(redirect_uri))\n\n # Pull auth code from redirect_uri & close browser\n code = browser.current_url.split(\"code=\")[1].split(\"#\")[0]\n browser.close()\n\n # Step 2: Auth Token\n\n body = {\"grant_type\": \"authorization_code\",\n \"code\": code,\n \"redirect_uri\": redirect_uri,\n \"client_id\": client_id,\n \"client_secret\": client_secret}\n auth_url = \"https://accounts.spotify.com/api/token\"\n res = requests.post(auth_url, data=body)\n auth_token = res.json()[\"access_token\"]\n try:\n refresh_token = res.json()[\"refresh_token\"]\n except Exception:\n refresh_token = None\n\n return (auth_token, refresh_token)", "def __init__(self, base_url, client_id, client_secret, client_scope, api_json = None):\n # type: (str, str, str, str, str) -> None\n\n self.base_url = base_url\n self.client_id = client_id\n self.client_secret = client_secret\n self.client_scope = client_scope\n\n # If the user doesn't pass an alternate API file use the included one\n if not api_json:\n api_json = pkg_resources.resource_filename(__name__, 'apis.json')\n\n with open(api_json, encoding='utf-8') as api_file:\n apis = json.loads(api_file.read())\n\n if client_scope in apis: \n api = apis.get(client_scope)\n self.token_url = api.get('token_url')\n self.api_call = sleep_and_retry(limits(calls=api.get('limits_calls'), period=api.get('limits_period'))(self._api_call))\n self.access_token = self.get_access_token(self.token_url)\n else: \n raise Exception(f\"Scope {client_scope} not in known API dict\")", "def spotify(request):\n\n logger.debug(\"Spotify Album Called\")\n response_data = {}\n\n spotify_pre_auth = spotify_auth(request)\n# if type(spotify_pre_auth) is JsonResponse:\n# return spotify_pre_auth\n\n response = spotify_pre_auth.get('result','')\n album_url = spotify_pre_auth.get('album_url','')\n response_code=spotify_pre_auth.get('status_code','')\n\n if response_code == 200:\n auth_data = json.loads(response)\n access_token = auth_data['access_token']\n\n headers = {\"Authorization\": \"Bearer %s\" % access_token}\n album_url = \"https://api.spotify.com/v1/albums/\" + album_url.split(\"/\")[-1]\n logger.debug(\"Spotify get album: %s\" % album_url)\n res = requests.get(album_url, headers=headers)\n if res.status_code == 200:\n album = json.loads(res.text)\n logger.debug(\"Found on Spotify the album: %(name)s\" % album)\n return json.loads(res.text)\n else:\n response = res.text\n response_code = res.status_code\n response_body = {\"result\": \"failure\", \"message\": \"Spotify album failed. Check the url or the connections\", \"status_code\": response_code}\n return response_body", "def service_client_initialization(self) -> global___Snippet.ClientInitialization:", "def __init__(__self__, *,\n client_id: pulumi.Input[str],\n secret: pulumi.Input[str]):\n pulumi.set(__self__, \"client_id\", client_id)\n pulumi.set(__self__, \"secret\", secret)", "def __init__(self, client_id: str, client_secret: str, access_token_publish_url: str, access_token: str = None):\n\n self.client_id = client_id\n self.client_secret = client_secret\n self.access_token_publish_url = access_token_publish_url\n self.api_base_url = 'https://api.ce-cotoha.com/api/dev/'\n\n if access_token is not None:\n self.access_token = access_token\n else:\n self.access_token = self.update_access_token()", "def make_spotify(\n *,\n session: Optional[requests.Session] = None,\n access_token: Optional[str] = None,\n token: Optional[Token] = None,\n client_id: Optional[str] = None,\n client_secret: Optional[str] = None,\n redirect_uri: Optional[str] = None,\n scope: Optional[str] = None,\n state: Optional[str] = None,\n) -> SyncSpotify:\n return SyncSpotify(\n RequestsClient(\n session=session,\n access_token=access_token,\n token=token,\n client_id=client_id,\n client_secret=client_secret,\n redirect_uri=redirect_uri,\n scope=scope,\n state=state,\n )\n )", "def __init__(self, client_id=None, client_secret=None):\n self.client_id = client_id\n self.client_secret = client_secret\n self.access_token = None\n self.refresh_token = None\n self.token_expiration_time = None", "def initialize_drive():\n credentials_drive = ServiceAccountCredentials.from_json_keyfile_name(\n KEY_FILE_LOCATION, \n SCOPE\n )\n return gspread.authorize(credentials_drive)", "def __init__(self, api_token):\r\n self.apiroot = 'https://api.pipedrive.com/v1'\r\n self.api_token = api_token\r\n self.add_filter(self.add_auth)", "def initialize_oauth2_session(self):\n\n def token_updater(token):\n \"\"\"Stores oauth2 token on disk\"\"\"\n try:\n with open(self.OAUTH_TOKEN_PATH, 'w') as f:\n json.dump(token, f)\n except Exception as err:\n log.Error('Could not save the OAuth2 token to %s. This means '\n 'you may need to do the OAuth2 authorization '\n 'process again soon. Original error: %s' % (\n self.OAUTH_TOKEN_PATH, err))\n\n token = None\n try:\n with open(self.OAUTH_TOKEN_PATH) as f:\n token = json.load(f)\n except IOError as err:\n log.Notice('Could not load OAuth2 token. '\n 'Trying to create a new one. (original error: %s)' % err)\n\n self.http_client = OAuth2Session(\n self.CLIENT_ID,\n scope=self.OAUTH_SCOPE,\n redirect_uri=self.OAUTH_REDIRECT_URL,\n token=token,\n auto_refresh_kwargs={\n 'client_id': self.CLIENT_ID,\n 'client_secret': self.CLIENT_SECRET,\n },\n auto_refresh_url=self.OAUTH_TOKEN_URL,\n token_updater=token_updater)\n\n if token is not None:\n self.http_client.refresh_token(self.OAUTH_TOKEN_URL)\n\n endpoints_response = self.http_client.get(self.metadata_url +\n 'account/endpoint')\n if endpoints_response.status_code != requests.codes.ok:\n token = None\n\n if token is None:\n if not sys.stdout.isatty() or not sys.stdin.isatty():\n log.FatalError('The OAuth2 token could not be loaded from %s '\n 'and you are not running duplicity '\n 'interactively, so duplicity cannot possibly '\n 'access Amazon Drive.' % self.OAUTH_TOKEN_PATH)\n authorization_url, _ = self.http_client.authorization_url(\n self.OAUTH_AUTHORIZE_URL)\n\n print('')\n print('In order to allow duplicity to access Amazon Drive, please '\n 'open the following URL in a browser and copy the URL of the '\n 'page you see after authorization here:')\n print(authorization_url)\n print('')\n\n redirected_to = (raw_input('URL of the resulting page: ')\n .replace('http://', 'https://', 1)).strip()\n\n token = self.http_client.fetch_token(\n self.OAUTH_TOKEN_URL,\n client_secret=self.CLIENT_SECRET,\n authorization_response=redirected_to)\n\n endpoints_response = self.http_client.get(self.metadata_url +\n 'account/endpoint')\n endpoints_response.raise_for_status()\n token_updater(token)\n\n urls = endpoints_response.json()\n if 'metadataUrl' not in urls or 'contentUrl' not in urls:\n log.FatalError('Could not retrieve endpoint URLs for this account')\n self.metadata_url = urls['metadataUrl']\n self.content_url = urls['contentUrl']", "def get_spotify_token(self):\n scope = \"playlist-modify-public playlist-modify-private user-read-email user-library-modify playlist-read-private\"\n token = spotipy.util.prompt_for_user_token(\n username=self.username,\n scope=scope,\n client_id=secrets.client_id,\n client_secret=secrets.client_secret,\n redirect_uri=secrets.redirect_uri\n )\n sp = spotipy.Spotify(auth=token)\n return sp", "def initialization_call(self) -> global___Snippet.ClientCall:", "def initialization_call(self) -> global___Snippet.ClientCall:", "def initialization_call(self) -> global___Snippet.ClientCall:", "def client_setup(self):\n self.client = Client()", "def __init__(self, requestor, client_id, client_secret, redirect_uri=None):\n super(TrustedAuthenticator, self).__init__(requestor, client_id,\n redirect_uri)\n self.client_secret = client_secret", "def __init__(__self__,\n resource_name: str,\n args: OAuthArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, client_id: str, client_secret: str,\n x_api_key: str, version: str):\n super().__init__(client_id, client_secret, x_api_key, version)", "def __init__(self):\r\n self.apiroot = 'http://ws.spotify.com/'\r\n self.add_filter(self.use_json)", "async def startup_handler(app):\n\n spotify_client_id = os.environ.get(SPOTIFY_CLIENT_ID)\n spotify_client_secret = os.environ.get(SPOTIFY_CLIENT_SECRET)\n\n # Save dependencies in the HTTP app.\n http.register_dependency(app, SPOTIFY_CLIENT_ID, spotify_client_id)\n http.register_dependency(app, SPOTIFY_CLIENT_SECRET, spotify_client_secret)\n\n async def cleanup(app):\n \"\"\"Perform required cleanup on shutdown\"\"\"\n # await client_session.close()\n\n app.on_shutdown.append(cleanup)", "def __init__(self, client_id, client_secret=None, access_token=None,\n refresh_token=None, verify=True):\n self.is_authenticated = False\n self.access_token = access_token\n self.client_id = client_id\n self.client_secret = client_secret\n self.DEFAULT_LIMIT = 100\n self.ratelimit_clientlimit = None\n self.ratelimit_clientremaining = None\n self.ratelimit_userlimit = None\n self.ratelimit_userremaining = None\n self.ratelimit_userreset = None\n self.refresh_token = refresh_token\n self.verify = verify", "def setup() -> None:\n creds = None\n if os.path.exists('token.json'):\n creds = Credentials.from_authorized_user_file('token.json', SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(creds.to_json())", "def __init__(self, access_token, endpoint='/me',\r\n version='2.5'):\r\n self.access_token = access_token\r\n self.endpoint = endpoint", "def __init__(self, api_key, client=Fetcher(FANART_URL)):\n self.api_key = api_key\n self.client = client", "def __init__(self, service, acces_key, secret_key):\n \n self.client = boto3.client(\n service,\n aws_access_key_id=acces_key,\n aws_secret_access_key=secret_key,\n )", "def __init__(self, **kwargs):\n\n session = vk_api.VkApi(**kwargs)\n try:\n session.auth(token_only=True)\n except vk_api.AuthError as error_msg:\n print(error_msg)\n raise\n self.api = session.get_api()", "def test_auth_client_instantiated():\n client = ConfigureClients()\n assert client.auth_client", "def __init__(self, credentials):\n http = credentials.authorize(httplib2.Http())\n self.service = googleapiclient.discovery.build(\"drive\", \"v2\", http=http)", "def get_token():\n\n def token_helper():\n token = util.prompt_for_user_token(username=\"robbo1992\",\n scope='user-library-read playlist-modify-private playlist-modify',\n client_id=config[\"spotify\"][\"client_id\"],\n client_secret=config[\"spotify\"][\"secret_id\"],\n redirect_uri='http://localhost:8080', cache_path=spotify_cache)\n return token\n\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n if motley.internet:\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n log.error(\"Authentication error in create_token method.\")\n raise Exception", "def _require_login(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + str(self.token))", "def __init__(self, client_id=None, access_token=None):\r\n if not client_id and not access_token:\r\n raise TypeError('__init__() must be passed at least one '\r\n 'of client_id, access_token')\r\n\r\n self.apiroot = 'https://api.instagram.com/v1'\r\n\r\n self.client_id = client_id\r\n self.access_token = access_token\r\n self.add_filter(self.add_authorization)", "def __init__(self, client_id, client_secret):\r\n self.client_id = client_id\r\n self.client_secret = client_secret\r\n\r\n self.add_filter(self.set_header)", "def __init__(self, region, user_pool_id, app_client_id):\n self.region = region\n self.user_pool_id = user_pool_id\n self.client_id = app_client_id\n self.client = boto3.client('cognito-idp', region_name=self.region)", "def __init__(self, access_token, base_url=\"https://api.crowdstrike.com\"):\n self.headers = {'Authorization': 'Bearer {}'.format(access_token)}\n self.base_url = base_url", "def __init__(self, access_token, base_url='https://api.crowdstrike.com'):\n self.headers = { 'Authorization': 'Bearer {}'.format(access_token) }\n self.base_url = base_url", "def __init__(self, token=None, token_path=\"tokens.txt\", username=None, password=None,\n grant_type=\"api-password\", client_id=\"brandwatch-api-client\",\n api_url=\"https://api.brandwatch.com/\"):\n self.api_url = api_url\n self.oauthpath = \"oauth/token\"\n\n if token:\n self._update_by_test_auth(username, token)\n self._write_auth(token_path)\n elif username is not None and password is not None:\n self._update_by_auth(username, password, token_path, grant_type, client_id)\n self._write_auth(token_path)\n elif username is not None:\n self._read_auth(username, token_path)\n else:\n raise KeyError(\"Must provide valid token, username and password,\"\n \" or username and path to token file\")", "def __init__(__self__, *,\n client_id: Optional[pulumi.Input[str]] = None,\n client_secret: Optional[pulumi.Input[str]] = None,\n issuer_uri: Optional[pulumi.Input[str]] = None,\n scope: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if client_id is not None:\n pulumi.set(__self__, \"client_id\", client_id)\n if client_secret is not None:\n pulumi.set(__self__, \"client_secret\", client_secret)\n if issuer_uri is not None:\n pulumi.set(__self__, \"issuer_uri\", issuer_uri)\n if scope is not None:\n pulumi.set(__self__, \"scope\", scope)", "def test_constructor_only_auth(self):\n test_utils.generate_test_config_file_with_only_powertrack()\n\n expected_auth = (test_utils.test_username, test_utils.test_password)\n expected_url = test_utils.test_powertrack_url\n client = PowerTrackClient(_dummy_callback, auth=expected_auth, config_file_path=config_file)\n\n self.assertEqual(expected_auth[0], client.auth[0])\n self.assertEqual(expected_auth[1], client.auth[1])\n self.assertEqual(expected_url, client.url)", "def __init__(self, **kwargs):\n\n super().__init__(transaction=None, config=None)\n secret_data = kwargs.get('secret_data')\n self.project_id = secret_data.get('project_id')\n\n try:\n credentials = google.oauth2.service_account.Credentials.from_service_account_info(secret_data)\n self.client = googleapiclient.discovery.build(self.google_client_service,\n self.version,\n credentials=credentials)\n\n except Exception as e:\n print()\n raise ERROR_UNKNOWN(message=e)", "def __init__(self, token=None, dry_run=False, server=\"https://api.realartists.com\"):\n \n self.API_VERSION = \"20151105\"\n self.server = server\n self.dry_run = dry_run\n if token is not None:\n self.setToken(token)\n else:\n self.setToken(os.getenv(\"SHIP_API_TOKEN\"))\n if self.token is None:\n raise Exception(\"Cannot find SHIP_API_TOKEN in environment. We need this.\")", "def setup(self):\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time. ONLY NEED To AUTH Once\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as self.token:\n self.creds = pickle.load(self.token)\n # If there are no (valid) credentials available, let the user log in.\n if not self.creds or not self.creds.valid:\n if self.creds and self.creds.expired and self.creds.refresh_token:\n self.creds.refresh(Request())\n else:\n self.flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n self.creds = self.flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as self.token:\n pickle.dump(self.creds, self.token)\n\n self.service = build('calendar', 'v3', credentials=self.creds)", "def set_credentials():", "def __initiate_s3client():\n boto3.setup_default_session(region_name=env.get('region'))\n s3client = boto3.client(\n 's3',\n aws_access_key_id=env.get('access_key_id'),\n aws_secret_access_key=env.get('secret_access_key')\n )\n return s3client", "def __init__(self):\n self.application_id = None\n self.secret = None\n self.token = {}", "def setUp(self):\r\n super(CLITestAuthKeystoneWithId, self).setUp()\r\n self.client = client.HTTPClient(user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)", "def __init__(self, app_key=None, app_sid=None, base_url=None,\n api_version=None, debug=False, proxy=None):\n configuration = Configuration(app_key=app_key,\n app_sid=app_sid,\n base_url=base_url,\n api_version=api_version,\n debug=debug,\n\t\t\t\t\t\t\t\t\t proxy=proxy)\n self.api_client = ApiClient(configuration)", "def __init__(self, kodi_helper, netflix_session):\n self.kodi_helper = kodi_helper\n self.netflix_session = netflix_session\n self.credentials = self.kodi_helper.get_credentials()\n self.profiles = []\n self.video_list_cache = {}\n self.prefetch_login()", "def __init__(self, auth_client_secret: AuthCredentials, dev: bool=False):\n\n self._timeout_config = (2, 20)\n self.auth_expires = 0 # unix time when auth expires\n self.auth_bearer = 0\n self.auth_client_secret = auth_client_secret\n self.session = requests.Session()\n self.dev = dev\n if dev:\n url = 'https://dev.wcs.api.semi.technology'\n else:\n url = 'https://wcs.api.semi.technology'\n self.url = url + '/v1/clusters'\n\n auth_path = (url.replace('://', '://auth.') +\n '/auth/realms/SeMI/.well-known/openid-configuration')\n\n # make _refresh_authentication method to point to _set_bearer method.\n self._refresh_authentication = lambda: self._set_bearer('wcs', auth_path)\n\n if isinstance(auth_client_secret, AuthCredentials):\n self._refresh_authentication()\n else:\n raise ValueError(\"No login credentials provided.\")", "async def spotify(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send('Invalid command passed. Use the `?help spotify` command to learn more.')", "def __init__(self, apiKey, apiSecret):\n self.apiKey = apiKey\n self.apiSecret = apiSecret", "def __init__(self):\n\n self._authorization = None\n self._last_used = datetime.utcnow() - timedelta(hours=10)\n\n self._resource_owner_key = None\n self._resource_owner_secret = None\n\n self._consumer_key = etrade_config.oauth_consumer_key\n self._consumer_secret = etrade_config.oath_consumer_secret\n\n self._auth_file_path = etrade_config.auth_file_path\n self._user_name = etrade_config.user_name\n self._user_pwd = etrade_config.user_pwd", "def setup(self):\n self.session = requests.session()\n self.session.headers.update({'Authorization': 'token %s' %\n self.access_token,\n 'Content-Type': 'application/json'})\n self.base_url = self.base_url_parts", "def recreate_client(token=None):\n if token:\n # If we've successfully retrieved the token from the session (or have\n # been provided with a token), get authorization.\n auth = get_spotify_auth(token)\n # TODO make sure auth token uses this too\n auth.refresh_token_if_needed(app_config[\"SPOTIFY_AUTH\"][\"token_duration\"])\n return Client(auth, session.get(\"client_session\"))\n else:\n return None" ]
[ "0.7275522", "0.7169035", "0.7160154", "0.7054514", "0.70088327", "0.6976066", "0.6880722", "0.6798923", "0.6781801", "0.67027813", "0.66602165", "0.6602743", "0.64574254", "0.6410066", "0.6331362", "0.63175875", "0.62989295", "0.6185303", "0.6183346", "0.6170306", "0.6090339", "0.60597277", "0.60455346", "0.60439485", "0.60426766", "0.6041466", "0.6040368", "0.60253686", "0.6013", "0.59943753", "0.59845406", "0.5973499", "0.59485006", "0.5942833", "0.5937066", "0.59292775", "0.5913646", "0.5904287", "0.59023184", "0.58798754", "0.58680475", "0.58668107", "0.5862634", "0.58614045", "0.5858266", "0.5850934", "0.5850891", "0.58491695", "0.5841024", "0.5838583", "0.5813238", "0.581074", "0.580892", "0.5801641", "0.5785367", "0.57799643", "0.5778541", "0.57663476", "0.5763139", "0.5763139", "0.5763139", "0.5758343", "0.57566667", "0.57448065", "0.574166", "0.57278", "0.5724094", "0.5718784", "0.5713001", "0.56928986", "0.5690406", "0.5690232", "0.56883925", "0.5676769", "0.56455886", "0.56448305", "0.5644056", "0.5642149", "0.5634985", "0.56043214", "0.56038547", "0.5603607", "0.5603272", "0.5601667", "0.55923337", "0.5589816", "0.5587635", "0.5587089", "0.558653", "0.55821437", "0.55808413", "0.5575011", "0.5573103", "0.5567165", "0.556016", "0.55591", "0.5556079", "0.55554795", "0.5554975", "0.5553404" ]
0.78885454
0
[creates a new playlist with given name, desc with given limts]
def create_new_playlist(self, name, desc=''): pl_names, _, _ = self.list_playlists() if name in pl_names: self.logger.debug( 'Playlist Name Already Exists, please use another name') else: pl = self.sp.user_playlist_create( self.user, name, public=False, description=desc) self.sp.user_playlist_change_details( self.user, pl['id'], collaborative=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_playlist(self, playlist_name):\n print(\"create_playlist needs implementation\")", "def create_playlist():\n sp = credentials()\n sp.user_playlist_create('truetiming', name='Billboard Hot 100')", "def create_playlist(self, data):\n pass", "def create_playlist(self, playlist_name):\n #self._video_playlist.name=playlist_name\n #self._video_playlist.caseless=playlist_name.lower()\n #print(f\"Successfully created new playlist: {self._video_playlist.name}\")\n if playlist_name.lower() not in self.playlists:\n self.playlists[playlist_name.lower()]=[]\n print(\"Successfully created new playlist: {0}\".format(playlist_name))\n else:\n print(\"Cannot create playlist: A playlist with the same name already exists\")", "def create_playlist(self, playlist_name):\n for playlist in self.playlists.keys():\n if playlist_name.upper() == playlist.upper():\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n break\n else:\n self.playlists[playlist_name]=[]\n print(\"Successfully created new playlist: \" + playlist_name)\n # print(\"create_playlist needs implementation\")", "def create_playlist(self, request):\n # TODO: Max amount of playlists at 20 for a user\n user = Account.find_by_id(request.userid)\n if user is None:\n print \"User not found\" \n return PlaylistResponse(errmsg=\"User ID not found\")\n new_pl = Playlist.add_new_playlist(user.key, request.name)\n return PlaylistResponse(pid=new_pl.key.id())", "def create_playlist(self, playlist_name):\n if playlist_name.upper() in self.playlist.keys():\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n else:\n self.playlist[playlist_name.upper()] = []\n self.playlist_list.append(playlist_name)\n print(f\"Successfully created new playlist: {playlist_name}\")", "def create_playlist(self, playlist_name):\n if playlist_name.lower() in self.playlists:\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n else:\n self.playlist_names[playlist_name.lower()] = playlist_name\n self.playlists[playlist_name.lower()] = []\n print(\"Successfully created new playlist:\", playlist_name)", "def create_playlist(self, playlist_name):\n new_playlist_id = playlist_name.lower()\n if new_playlist_id in self.playlists.keys():\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n return\n\n new_playlist = Playlist(playlist_name)\n self.playlists[new_playlist_id] = new_playlist\n print(f\"Successfully created new playlist: {playlist_name}\")", "def playlist_create(self, user_id: str, name: str, public: bool = True,\n description: str = ''):\n payload = {\n 'name': name,\n 'public': public,\n 'description': description\n }\n return self._post(f'users/{user_id}/playlists', payload=payload)", "def create_playlist(self, playlist_name):\n if playlist_name.lower() in self._playlists:\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n return\n print(f\"Successfully created new playlist: {playlist_name}\")\n self._playlists[playlist_name.lower()] = Playlist(playlist_name)", "def create_playlist(self, playlist_name):\n playlist_name = Playlist()\n if self != playlist_name:\n print(f\"successfully created new playlist: {playlist_name}\")\n else:\n print(\"Cannot create playlist: A playlist with the same name already exists\")", "def create_playlist(self, title, description=\"\"):\n if self.youtube is None:\n self.youtube = __get_client()\n # This code creates a new, private playlist in the authorized user's\n # channel.\n playlists_insert_response = self.youtube.playlists().insert(\n part=\"snippet,status\",\n body = {\n \"snippet\": {\n \"title\": title,\n \"description\": description\n },\n \"status\": {\n \"privacyStatus\": \"private\"\n }\n }\n ).execute()\n return playlists_insert_response", "def spotify_create_playlist(\n playlist_name: str,\n access_token: str,\n user_spotify_id: str,\n public: bool = True,\n description: str = None,\n) -> str:\n headers = {\n \"Authorization\": \"Bearer {}\".format(access_token),\n \"Content-Type\": \"application/json\",\n }\n body = {\"name\": playlist_name, \"public\": public}\n if description is not None:\n body[\"description\"] = description\n response = requests.post(\n \"https://api.spotify.com/v1/users/{}/playlists\".format(user_spotify_id),\n headers=headers,\n json=body,\n )\n if response.status_code != 200 and response.status_code != 201:\n return \"Error {}\".format(response.text)\n return response.json()[\"id\"]", "def create_playlist(self, name):\n\n user_id = self.get_current_user()\n endpoint = f\"/users/{user_id}/playlists\"\n headers = self.headers\n headers.update()\n response = self._send(\n endpoint,\n \"POST\",\n extra_headers={\"Content-Type\": \"application/json\"},\n data=json.dumps({\"name\": name, \"public\": False})\n )\n playlist_id = response.json()[\"id\"]\n return playlist_id", "def create_playlist(user_id, sp, recommendations, name, description):\r\n \r\n # Get current user ID\r\n current_user = sp.current_user()\r\n current_user_id = current_user['id']\r\n \r\n # Get list of track ID's\r\n track_id_list = list(recommendations['id'].values)\r\n \r\n # Create Empty playlist\r\n sp.user_playlist_create(user = user_id, \r\n name = name, \r\n description = description)\r\n \r\n # Get playlist ID\r\n playlists = sp.current_user_playlists(limit=1)\r\n playlist_name = playlists['items'][0]['name']\r\n playlist_id = playlists['items'][0]['id']\r\n \r\n # Add tracks to playlist\r\n sp.user_playlist_add_tracks(user = current_user_id, \r\n playlist_id = playlist_id, \r\n tracks = track_id_list)\r\n \r\n # Check if playlist is succesfully created.\r\n if name == playlist_name:\r\n return '**Playlist was succesfully created on your Spotify account.**'\r\n else:\r\n return '**Playlist was not succesfully created.**'", "def newpl(self, args):\n if not args:\n self.err_print('One argument required')\n return\n elif len(args) == 1:\n plname = args[0]\n\n if self.pl_exists(plname) >= 0:\n self.err_print('Playlist \"{}\" already exists'.format(plname))\n return\n\n playlist.Playlist.init_pl(plname, self.ui.db)\n newpl = menu.Music_menu(win=self.ui.rightwin.win,\n data=playlist.Playlist(name=plname, db=self.ui.db),\n form=config.SONG_DISP,\n palette=self.ui.palette[0], ui=self.ui)\n else:\n plname = args[0]\n plfile = args[1]\n if not os.path.isfile(plfile):\n self.err_print('File does not exist: {}.'.format(plfile))\n return\n\n if self.pl_exists(plname) >= 0:\n self.err_print('Playlist \"{}\" already exists'.format(plname))\n return\n\n playlist.init_pl(plname, self.ui.db)\n newpl = menu.Menu(win=self.ui.rightwin.win,\n data=playlist.Playlist(name=plname, db=self.ui.db),\n form=config.SONG_DISP,\n cursor_colour=config.CURSOR[0],\n highlight_colour=config.HIGHLIGHT_COLOUR[0],\n normal_colour=config.NORMAL[0])\n\n newpl.insert_from_file(plfile)\n\n self.ui.leftwin.insert(newpl)\n self.ui.leftwin.disp()", "def user_playlist_create(self, user, name, public=True, description=\"\", **kwargs):\n # pylint: disable=no-member\n data = {\"name\": name, \"public\": public, \"description\": description}\n return self._post(\n API.PLAYLISTS.value.format(user_id=user), payload=data, **kwargs\n )", "def create_playlist(self, playlist_name: str, song_ids: List[str]) -> str:\n user = self.init_user()\n user_id = user.me()['id']\n playlist_data = user.user_playlist_create(\n user=user_id, name=playlist_name, public=True)\n user.playlist_add_items(playlist_data['id'], song_ids)\n playlist_link = playlist_data['external_urls']['spotify']\n return playlist_link", "def createspotifyplaylist(accesstoken, name, playlists, tracklist, userid):\n\n # find a unique name for the playlist\n playlistname = \"{} - flowed\".format(name)\n if playlistname in playlists:\n num = 1\n playlistname = \"{} - flowed ({})\".format(name, num)\n while playlistname in playlists:\n num = num + 1\n playlistname = \"{} - flowed ({})\".format(name, num)\n\n # create playlist\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"name\"] = playlistname\n\n url = \"https://api.spotify.com/v1/users/{}/playlists\".format(userid)\n\n r = requests.post(url, headers=headers, json=payload)\n\n response = r.json()\n\n\n if \"collaborative\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n retry = True\n while retry:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n r = requests.post(url, headers=headers, json=payload)\n response = r.json()\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n continue\n else:\n print(\"error: problem creating spotify playlist\")\n print(response[\"error\"])\n return(False)\n elif \"collaborative\" in response:\n break\n else:\n print(\"error: problem creating spotify playlist\")\n print('no error response')\n return(False)\n else: \n print(\"error: problem creating spotify playlist\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: problem creating spotify playlist\")\n print('no error response')\n return(False)\n\n playlistid = response[\"id\"]\n playlisturl = response[\"external_urls\"][\"spotify\"]\n\n # add tracks to playlist\n while len(tracklist) > 100:\n\n # add first 100\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"uris\"] = tracklist[:100]\n\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n\n response = r.json()\n if \"snapshot_id\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n continue\n else:\n print(\"error: problem adding songs to playlist\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: problem adding songs to playlist\")\n print(\"no error response\")\n return(False)\n\n tracklist = tracklist[100:]\n\n if tracklist:\n\n # add the remainder of the tracks\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"uris\"] = tracklist\n\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n\n response = r.json()\n if \"snapshot_id\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n retry = True\n while retry:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n response = r.json()\n if \"snapshot_id\" in response:\n break\n elif response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n continue\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(\"no error response\")\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(\"no error response\")\n return(False)\n\n return(playlistname, playlisturl)", "def create_playlist(access_token):\n request_body = json.dumps({\n \"name\": \"SpotiAdd\",\n \"description\": \"All Liked Youtube Videos\",\n \"public\": True\n })\n userId = getUserId(access_token)\n query = \"https://api.spotify.com/v1/users/{}/playlists\".format(\n userId)\n response = requests.post(\n query,\n data=request_body,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(access_token)\n }\n )\n response_json = response.json()\n # print(\"create_playlist_id : {}\".format(response_json),file = sys.stdout)\n return response_json[\"id\"]", "def create_dp_playlist(msg):\n print ''\n print '------'\n print '***Dynamic Programming method***'\n print 'Original message: ', msg\n # Normalize and tokenize message and use it to query songs\n words = normalize(msg).split(' ')\n songs = ngram_search(words)\n # Form playlist and print\n playlist = dp_parse(normalize(msg), songs=songs)\n print 'Playlist: '\n print '# | SONG TITLE | ARTIST | ALBUM'\n for i, p in enumerate(playlist[2]):\n song_info = '{0} | {1} | {2}'.format(p.Title, ', '.join(p.Artists),\n p.Album)\n print '{0}. | '.format(i + 1) + song_info", "def create_playlist(self):\n playlist=self.sp.user_playlist_create(user=self.username,name=self.nameOfPlaylist,description=self.description)\n return playlist['id']", "def __init__(self, pname, pmax, plist):\n\n #the player has to have... \n self.name = pname\n self.max_items = pmax\n self.items = plist", "def playlist_add(nums, playlist):\n nums = _parse_multi(nums)\n\n if not g.userpl.get(playlist):\n playlist = playlist.replace(\" \", \"-\")\n g.userpl[playlist] = Playlist(playlist)\n\n for songnum in nums:\n g.userpl[playlist].songs.append(g.model.songs[songnum - 1])\n dur = g.userpl[playlist].duration\n f = (len(nums), playlist, g.userpl[playlist].size, dur)\n g.message = F('added to saved pl') % f\n\n if nums:\n save_to_file()\n\n g.content = generate_songlist_display()", "def create_new_pl(self, params):\n name = params[ONE]\n user = params[ZERO]\n songs = params[2].split('&')\n msg = self.db.create_new_pl(songs, name, user)\n self.send_message(msg)", "def add_playlist(self, names, printQueue=False):\n idtoadd = [self.listIDs[n] for n in names]\n self.spotify.add_playlist_to_queue(idtoadd)\n\n if printQueue:\n self.console.print('This is your current queue: ')\n self.console.print(self.spotify.queue.loc[:10, ['name', 'album', 'artist']])", "def playlist(self, channel_list, limit, part='contentDetails', only_id=1):\n playlist_details = {}\n key = self.keylist[self.keyindex]\n url_pi = 'https://www.googleapis.com/youtube/v3/playlistItems/'\n\n if limit <= 50 and limit > 0:\n maxResults = limit\n else:\n maxResults = 50\n\n for chnlid in channel_list:\n vidcount = initial = 0\n nextPageToken = ''\n results = []\n # print('UU'+chnlid[2:])\n try:\n while nextPageToken or initial == 0:\n querystring = {\n 'playlistId': 'UU' + chnlid[2:],\n 'part': part,\n 'key': key,\n 'pageToken': nextPageToken,\n 'maxResults': maxResults\n }\n\n\n response = request_handler(self, url_pi, params=querystring, wait=5) #ids=chnlid)\n # print(\"#\"*5, response.json())\n # print(response.json())\n if response.get('error'):\n while response['error']['errors'][0]['reason'] == 'quotaExceeded' or \\\n response['error']['errors'][0]['reason'] == 'dailyLimitExceeded':\n key = keychange(self)\n querystring = {\n 'playlistId': 'UU' + chnlid[2:],\n 'part': part,\n 'key': key,\n 'pageToken': nextPageToken,\n 'maxResults': maxResults\n }\n\n response = request_handler(self, url_pi, params=querystring, wait=5, ids=chnlid)\n\n if response.get('error'):\n playlist_details.update({chnlid: 'error'})\n if response['error']['errors'][0]['reason'] == 'keyInvalid':\n return [{chnlid:'error'}]\n break\n\n if response.get('Interneterror'):\n results.append(response)\n #print(playlist_details)\n break\n\n if limit == -1:\n limit = response['pageInfo']['totalResults']\n # print(response,response.text)\n \n if only_id == 1:\n for i in range(response['pageInfo']['resultsPerPage']):\n try:\n results.append(response['items'][i]['contentDetails']['videoId'])\n except:\n pass\n else:\n results.append(response['items'])\n nextPageToken = response.get('nextPageToken')\n vidcount += len(response['items'])\n if vidcount >= limit:\n break\n print(\"Video id found: \", chnlid, \" : \", vidcount)\n #{'error':[]}\n \n initial += 1\n \n playlist_details.update({chnlid:results})\n\n except Exception as e:\n print(\"Error: \", e, \" : \", traceback.print_exc())\n playlist_details[chnlid] = 'error'\n break\n\n return playlist_details", "def NewListFromParameters(self, name:str, desc:str) -> AbstractItemList:\n ret = self.NewList()\n ret._name = name\n ret._desc = desc\n return ret", "def new_queue(self, params, maxUsers=1):\n self.spotify.reset_queue()\n\n #Extract parameters\n mood = params[0]\n users = []\n for i in range(maxUsers):\n if len(params[i + 1]) > 0:\n users.append(params[i + 1])\n\n #Add default host if only one guest is present\n # if len(users) == 1:\n # users.append('Paolo')\n\n #Lists to load\n names = []\n for n in self.listIDs.keys():\n for u in users:\n if len(mood) > 0:\n if u + ':' + mood in n:\n names.append(n)\n else:\n if 'top:' + u in n:\n names.append(n)\n\n\n self.add_playlist(names)", "def show_playlist(self, playlist_name):\n print(f\"Showing playlist: {playlist_name}\")\n print(\" No videos here yet\")", "def create(self, name, uri_scheme=None):\n if uri_scheme in self.backends.with_playlists:\n backends = [self.backends.with_playlists[uri_scheme]]\n else:\n backends = self.backends.with_playlists.values()\n\n for backend in backends:\n with _backend_error_handling(backend):\n result = backend.playlists.create(name).get()\n if result is None:\n continue\n validation.check_instance(result, Playlist)\n listener.CoreListener.send('playlist_changed', playlist=result)\n return result\n\n return None", "def generate_playlist():\n\n with open(r'C:\\Users\\adria\\OneDrive\\Desktop\\Muzica.txt', 'w+', encoding='utf-8') as playlist:\n playlist_songs = os.listdir('D:\\\\Muzica\\\\')\n for song in playlist_songs:\n playlist.write(song + '\\n')", "def new_playlist_command(self):\n self.parent.song_object_list.clear()\n self.display_data(self.parent.song_object_list)\n self.playlist_select.set(\"Working Playlist\")", "def add_song_to_playlist(self):\n #populate our songs dictionary\n self.get_liked_videos()\n\n #collect all of uri\n uris = []\n for song,info in self.all_song_info.items():\n uris.append(info[\"spotify_uri\"])\n\n #create a new playlist\n playlist_id = self.create_playlist()\n\n #add all songs into new playlist\n\n #Spotipy can only add 100 songs at a time to a playlist that is why this method is taken\n g = len(uris)\n if g > 100:\n s = 0\n e = 99\n while g > 100:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:e])\n g -= 100\n s = e + 1\n e += 100\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:])\n else:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris)", "def user_playlist_create(self, user, name, public=True, collaborative=False):\n url = '/users/{user_id}/playlists'\n uid = get_id('user', user)\n body = dict(name=name, public=public, collaborative=collaborative)\n return self._post(url.format(user_id=uid), payload=body)", "def playlist_create_failed(self, name):\n message = {\n \"timestamp\": self._get_time(),\n \"level\": \"ERROR\",\n \"type\": \"PLAYLIST_CREATE_FAILED\",\n \"name\": name,\n }\n\n self._log_queue.put(json.dumps(message))", "def create(\n self,\n name: str,\n uri_scheme: Optional[UriScheme] = None,\n ) -> Optional[Playlist]:\n if uri_scheme in self.backends.with_playlists:\n backends = [self.backends.with_playlists[uri_scheme]]\n else:\n backends = self.backends.with_playlists.values()\n\n for backend in backends:\n with _backend_error_handling(backend):\n result = backend.playlists.create(name).get()\n if result is None:\n continue\n validation.check_instance(result, Playlist)\n listener.CoreListener.send(\"playlist_changed\", playlist=result)\n return result\n\n return None", "def show_playlist(self, playlist_name):\n if self.playlists[playlist_name.lower()]!=[]:\n print(f\"Showing playlist: {playlist_name}\")\n for i in self.playlists[playlist_name.lower()]:\n videos = self._video_library.get_all_videos()\n templist = []\n\n def converttostr(input_seq, seperator):\n # Join all the strings in list\n final_str = seperator.join(input_seq)\n return final_str\n\n for vid in videos:\n if i == vid.video_id:\n templist.append([vid.title,vid.video_id,vid.tags])\n\n print(f\" {templist[0][0]} ({templist[0][1]}) [{converttostr(list(templist[0][2]), ' ')}]\")\n else:\n print(f\"Showing playlist: {playlist_name}\")\n print(\" No videos here yet\")\n #print(\"show_playlist needs implementation\")", "def add_videos(playlist):\n surl = playlist['link']\n # 작은 playlist의 url을 surl에 저장\n soup = get_soup(surl)\n # 작은 플레이리스트의 html 파싱하여 soup에 저장\n print(f\" getting videos for playlist: {playlist['title']}\")\n\n videos = []\n\n # items are list of video a links from list\n items = soup('a', class_='yt-uix-tile-link')\n # a 태그의 class가 'yt-uix-tile-link'인 태그 items에 저장\n # items는 작은 플레이리스트의 동영상 목록들임\n\n # note first part of look get info from playlist page item,\n # and the the last part opens the video and gets more details\n if len(items) > 0:\n for i in items:\n # 각각의 items i에 하나씩 저장\n d = dict()\n vurl = fix_url(i['href'])\n # 동영상 url을 vurl에 저장\n t = i.find_next('span', {'aria-label': True})\n # 동영상의 span 태그 중 aria=label값이 존재하는 것 t에 저장\n # t는 동영상의 재생 시간임\n d['time'] = t.text if t else 'NA'\n # d 딕셔너리에 t저장\n\n d.update(parse_video(vurl))\n videos.append(d)\n # videos에 d를 append\n\n else: # must be only one video\n d = {'time': 'NA'}\n d.update(parse_video(surl))\n videos.append(d)\n\n # add new key to this playlist of list of video infos\n playlist['videos'] = videos\n print()", "async def create_playlist_players(\n self, playlist: Playlist, requester: discord.Member\n ) -> List[Player]:\n\n return await Player.make_multiple_players(\n self.youtube,\n playlist.url,\n [\n str(song) for song in playlist.songs\n ], # Converts the song to str to convert any spotify tracks.\n requester,\n )", "def _create_offer(\n org,\n offer_item_name='Test Item',\n offer_limit=None,\n currents_share=25,\n is_master=False\n):\n offer_item = Item(name=offer_item_name)\n offer_item.save()\n\n offer = Offer(\n org=org,\n item=offer_item,\n currents_share=currents_share,\n is_master=is_master\n )\n\n if offer_limit:\n offer.limit = offer_limit\n\n offer.save()\n\n return offer", "def add_from_playlist(self, params):\n lists = params\n\n # Lists to load\n names = []\n for n in self.listIDs.keys():\n for l in lists:\n if 'playlist:' + l in n:\n names.append(n)\n\n self.add_playlist(names)", "def add_to_playlist(track_ids, playlist_name):\n \n playlist_id = find_playlist(playlist_name)\n \n spotifyObject.user_playlist_add_tracks(config.USERNAME, playlist_id,\n track_ids, position=None)", "def add_tracks_to_lib(title, gm_api):\r\n # Extract single playlist\r\n if not (gm_api.is_authenticated):\r\n sys.stderr.write('Error: api not authenticated')\r\n return None\r\n allPLs = gm_api.get_all_user_playlist_contents()\r\n\r\n pl= next((p for p in allPLs if p['name'] == title), None)\r\n if pl == None:\r\n sys.stderr.write('Error: could not find desired playlist')\r\n return None\r\n # add playlist's tracks to library\r\n # to_add = []\r\n num_added = 0\r\n num_bad_data = 0\r\n for t in pl['tracks']:\r\n metadata = t.get('track', None)\r\n if metadata != None:\r\n #to_add.append(metadata['storeId'])\r\n gm_api.add_store_tracks([metadata['storeId']])\r\n num_added += 1\r\n else:\r\n num_bad_data += 1\r\n # Gmusicapi call\r\n #gm_api.add_store_tracks(to_add)\r\n #print(\"Added \", len(to_add), \" tracks to library.\\n\")\r\n print(\"Added \", num_added, \" tracks to library.\\n\")\r\n print(\"Unable to add \", num_bad_data, \" tracks.\\n\")", "def add_video_to_playlist(youtube, args, privacy=\"public\"):\n video_id = args['video_id']\n playlist_id = args['playlist_id']\n \n print(video_id)\n #print(type(args))\n \n if playlist_id:\n return add_video_to_existing_playlist(youtube, playlist_id, video_id)\n else:\n lib.debug(\"Error adding video to playlist\")", "def show_playlist(self, playlist_name):\n playlist_exists = False\n for playlist in list(self.playlists.keys()):\n if playlist_name.upper() == playlist.upper():\n playlist_exists = True\n real_playlist_name = playlist\n break\n if playlist_exists:\n print(f\"Showing playlist: {playlist_name}\")\n if len(self.playlists[real_playlist_name]) == 0:\n print(\"\\tNo videos here yet\")\n else:\n for song in self.playlists[real_playlist_name]:\n video = self._video_library.get_video(song)\n tags = str(video.tags)\n tags=tags.replace(\"'\",\"\")\n tags=tags.replace(\",\", \"\") \n tags=tags.replace(\")\", \"\") \n tags=tags.replace(\"(\", \"\") \n print(f\"{video.title} ({video.video_id}) [{tags}]\")\n\n else:\n print(f\"\\tCannot show playlist {playlist_name}: Playlist does not exist\")\n\n # print(\"show_playlist needs implementation\")", "def createLimit(name, maxValue):\n return Limit(Cuebot.getStub('limit').Create(\n limit_pb2.LimitCreateRequest(name=name, max_value=maxValue), timeout=Cuebot.Timeout))", "def new_song():\n song_id = int(request.args['song_id'])\n track_info = shiva.get_tracks([song_id])[song_id]\n vlc.add_song(track_info['path'])\n return 'ok'", "def obj_create(self, bundle, request=None, **kwargs):\n return super(PlaylistResource, self).obj_create(bundle, request, user=request.user)", "async def playlist(self, data, msg):\n for i in data['queue']:\n print(i)\n self.player[msg.guild.id]['queue'].append(\n {'title': i, 'author': msg})", "async def add_playlist(\n self, user: discord.User, url: str\n ) -> Optional[UserPlaylist]:\n\n playlist = await get_playlist(self.spotify, self.youtube, url)\n\n if not playlist:\n return\n\n generated_id = str(uuid.uuid4())\n await self.database.insertifnotexists(\n self.tables[\"playlists\"],\n {\"user\": user.id, \"playlist_url\": url, \"id\": generated_id},\n {\"user\": user.id, \"playlist_url\": url},\n )\n\n return UserPlaylist(self, user, generated_id, playlist)", "def test_basic_playlist_functionality(self):\n self.assertEqual(mpmodels.MediaItem.objects.count(), 0)\n set_resources_and_sync(\n [make_video(title='test title', media_id='1')],\n [make_channel(\n title='Didius Julianus',\n description='What evil have I done?',\n media_ids=['1'],\n collection_id='2',\n instid='UIS',\n )],\n )\n media_item = mpmodels.MediaItem.objects.first()\n playlist = mpmodels.Playlist.objects.filter(sms__id='2').first()\n\n self.assertEqual(playlist.title, 'Didius Julianus')\n self.assertEqual(playlist.description, 'What evil have I done?')\n self.assertEqual(len(playlist.media_items), 1)\n self.assertEqual(playlist.media_items[0], media_item.id)", "def __init__(self, name, plays, number):\n self.name = name\n self.plays = plays\n self.number = number", "def create_popular_shows():\n return", "def create_item(_id, item_name, description):\n data_ = Data.get_the_data(_id, Data.bucketlists)\n for data in data_:\n bucketlist = Bucketlist(data['title'],\n data['owner'],\n data['intro'],\n data['owner_id'],\n data['_id'])\n bucketlist.new_item(item_name=item_name,\n description=description)", "def __init__(\n self,\n data,\n on_repeat,\n datatype=None,\n playlisttype=None,\n show_lyrics=False,\n dont_cache_search=False,\n no_cache=False,\n no_related=False,\n disable_kw=False,\n ):\n URLPlayer.__init__(\n self,\n show_lyrics=show_lyrics,\n dont_cache_search=dont_cache_search,\n no_cache=no_cache,\n )\n NamePlayer.__init__(\n self,\n show_lyrics=show_lyrics,\n dont_cache_search=dont_cache_search,\n no_cache=no_cache,\n disable_kw=disable_kw,\n )\n self._iterable_list = []\n self.data = data\n self.datatype = datatype\n self.playlisttype = playlisttype\n self.no_related = no_related\n self.on_repeat = on_repeat\n self._playlist_names = [\n \"spotify\",\n \"youtube\",\n \"soundcloud\",\n \"billboard\",\n \"jiosaavn\",\n \"gaana\",\n \"cached\",\n \"youtubemusic\",\n ]\n self._datatypes = [\"playlist\", \"song\", \"URL\"]\n self.show_lyrics = show_lyrics\n self.dont_cache_search = dont_cache_search\n self.no_cache = no_cache", "def test_api_video_create_by_playlist_admin_missing_title(self):\n user = factories.UserFactory()\n playlist = factories.PlaylistFactory()\n factories.PlaylistAccessFactory(\n role=models.ADMINISTRATOR, playlist=playlist, user=user\n )\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n\n self.assertEqual(models.Video.objects.count(), 0)\n\n response = self.client.post(\n \"/api/videos/\",\n {\"playlist\": str(playlist.id)},\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(models.Video.objects.count(), 0)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n response.json(),\n {\"errors\": [{\"title\": [\"This field is required.\"]}]},\n )", "def add_playlist_tracks(self, username, playlist_name, track_list):\n playlist_id = self.get_playlist_id(username, playlist_name)\n request_chunks = [track_list[i:i + 100] for i in range(0, len(track_list), 100)] # Blocks of 100 songs\n for track_chunk in request_chunks:\n self.spotify.user_playlist_add_tracks(username, playlist_id, track_chunk)", "def savePlaylist():\n\n # get user form info\n title = request.json.get('title')\n interval = request.json.get('interval')\n orig_playlist_id = request.json.get('playlist_id')\n\n # create a new playlist\n new_playlist = crud.createPlaylist(session, title)\n\n new_playlist_id = new_playlist['id']\n\n user_id = session['user_id']\n\n # store playlist in DB\n savedPlaylist = crud.storeSavedPlaylist(user_id, orig_playlist_id, \n new_playlist_id, interval, title)\n print(savedPlaylist)\n \n # copy over tracks in original playlist to the new playlist\n snapshot_id = crud.updatePlaylist(session, orig_playlist_id, new_playlist_id)\n\n return snapshot_id", "def playlist_rename(playlists):\n # Deal with old playlist names that permitted spaces\n a, b = \"\", playlists.split(\" \")\n while a not in g.userpl:\n a = (a + \" \" + (b.pop(0))).strip()\n if not b and a not in g.userpl:\n g.message = F('no pl match for rename')\n g.content = g.content or playlists_display()\n return\n\n b = \"-\".join(b)\n g.userpl[b] = Playlist(b)\n g.userpl[b].songs = list(g.userpl[a].songs)\n playlist_remove(a)\n g.message = F('pl renamed') % (a, b)\n save_to_file()", "def insert_playlist(self, playlist_contents):\n\n # Just make sure we don't overwrite an existing playlist! Silly python not having do-while..\n while True:\n playlist_uuid = str(uuid4())\n if playlist_uuid not in self.playlists:\n break\n\n try:\n playlist = Playlist(playlist_contents)\n except PlaylistValidationError as e:\n rsp = rsp_codes[8]\n rsp['trace'] = traceback.format_exc()\n return rsp\n\n self.playlists[playlist_uuid] = playlist\n\n rsp = rsp_codes[0]\n rsp['playlist_uuid'] = playlist_uuid\n return rsp", "def test_list_playlist_not_duplicated(self):\n user = factories.UserFactory()\n organization = factories.OrganizationFactory()\n\n playlist = factories.PlaylistFactory(\n organization=organization,\n )\n factories.PlaylistAccessFactory(\n playlist=playlist,\n user=user,\n role=models.ADMINISTRATOR,\n )\n factories.PlaylistAccessFactory.create_batch(\n 3,\n playlist=playlist,\n role=models.ADMINISTRATOR,\n )\n factories.OrganizationAccessFactory(\n organization=organization,\n user=user,\n role=models.ADMINISTRATOR,\n )\n factories.OrganizationAccessFactory.create_batch(\n 3,\n organization=organization,\n role=models.ADMINISTRATOR,\n )\n\n jwt_token = UserAccessTokenFactory(user=user)\n\n response = self.client.get(\n \"/api/playlists/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json()[\"count\"], 1)\n self.assertEqual(response.json()[\"results\"][0][\"id\"], str(playlist.id))", "async def create_player(\n self, query: str, requester: discord.Member\n ) -> List[Player]:\n\n if SPOTIFY_RE.match(query) and self.spotify_support:\n return await Player.make_multiple_players(\n self.youtube,\n query,\n [song for song in await self.spotify.get_songs(query)],\n requester,\n )\n\n return await Player.make_players(self.youtube, query, requester)", "def show_playlist(self, playlist_name):\n if playlist_name.lower() not in self._playlists:\n print(f\"Cannot show playlist {playlist_name}: Playlist does not exist\")\n return\n playlist = self._playlists[playlist_name.lower()]\n print(f\"Showing playlist: {playlist_name}\")\n if not playlist.videos:\n print(\"No videos here yet\")\n for video in playlist.videos:\n print(video)", "def playlist_search(search_term, results=5):\r\n if search_term:\r\n url = PLAYLIST_SEARCH_URL.format(API_KEY, util.web.quote(search_term.encode('ascii', 'ignore')))\r\n response = util.web.http_get(url=url, json=True, referer='https://tinychat.com')\r\n\r\n if response['json'] is not None:\r\n play_lists = []\r\n try:\r\n if 'items' in response['json']:\r\n for i, item in enumerate(response['json']['items']):\r\n if i == results:\r\n return play_lists\r\n playlist_id = item['id']['playlistId']\r\n playlist_title = item['snippet']['title'].encode('ascii', 'ignore')\r\n play_list_info = {\r\n 'playlist_title': playlist_title,\r\n 'playlist_id': playlist_id\r\n }\r\n play_lists.append(play_list_info)\r\n except KeyError as ke:\r\n log.error(ke, exc_info=True)\r\n return None", "def set_playlist(self, playlist: List[Dict[str, Any]]) -> None:\n self._playlist = copy.deepcopy(playlist)", "def create_df_playlist(api_results,sp = None, append_audio = True):\r\n df = create_df_saved_songs(api_results[\"tracks\"])\r\n if append_audio == True:\r\n assert sp != None, \"sp needs to be specified for appending audio features\"\r\n df = append_audio_features(df,sp)\r\n return df", "def add_to_playlist(file, list, data = None):\n\n if not list:\n return\n\n exists = os.path.isfile(list)\n playlist = open(list, 'a')\n if not exists:\n playlist.write(\"#EXTM3U\\n\")\n\n if data:\n metadata = u\"#EXTINF: {}, {} - {} \\n\".format(data['time'], data['artist'], data['title'])\n playlist.write(metadata.encode('utf8'))\n\n playlist.write(file + \"\\n\")\n playlist.close()\n try:\n print 'Added to {}'.format(os.path.basename(list))\n except:\n pass", "def add_to_playlist(self, playlist_name, video_id):\n if playlist_name.lower() not in self.playlists:\n print(\"Cannot add video to\", playlist_name, end=\"\")\n print(\": Playlist does not exist\")\n elif self._video_library.get_video(video_id) is None:\n print(\"Cannot add video to\", playlist_name, end=\"\") \n print(\": Video does not exist\")\n elif self._video_library.get_video(video_id).flagged:\n print(f\"Cannot add video to {playlist_name}: Video is currently flagged (reason: {self._video_library.get_video(video_id).flag_reason})\")\n elif self._video_library.get_video(video_id) in self.playlists[playlist_name.lower()]:\n print(\"Cannot add video to\", playlist_name, end=\"\") \n print(\": Video already added\")\n else:\n print(\"Added video to\", playlist_name, end=\"\") \n print(\":\",self._video_library.get_video(video_id).title)\n self.playlists[playlist_name.lower()].append(self._video_library.get_video(video_id))", "def set_playlist(self, playlist):\n self._playlist = playlist", "def generate_default_playlist(self):\n self.clear_playlist()\n for cn in self._preset_classes:\n name = cn + \"-1\"\n inst = self._preset_classes[cn](self._app.mixer, name=name)\n inst.setup()\n self._playlist.append(inst)\n self._notifier.playlist_changed.emit()", "def playlist_created(self, playlist):\n message = {\n \"timestamp\": self._get_time(),\n \"level\": \"INFO\",\n \"type\": \"PLAYLIST_CREATED\",\n \"playlist\": json.dumps(playlist),\n }\n\n self._log_queue.put(json.dumps(message))", "def playlist(self):\n def iconv(s):\n encoding = self.options[\"id3_encoding\"]\n try:\n if encoding:\n return s.encode('latin1').decode(encoding).encode('utf-8')\n else:\n return s.encode('latin1')\n except UnicodeEncodeError:\n return \"\"\n\n lst = []\n r = self.x.playlist_list_entries()\n r.wait()\n for id in r.get_list():\n r = self.x.medialib_get_info(id)\n r.wait()\n if r.iserror():\n print r.get_error()\n lst.append(' ')\n continue\n song = r.get_propdict()\n try:\n artist = iconv(song[('plugin/id3v2', 'artist')])\n except KeyError:\n try:\n artist = iconv(song[('plugin/mad', 'artist')])\n except KeyError:\n artist = ''\n try:\n title = iconv(song[('plugin/id3v2', 'title')])\n except KeyError:\n try:\n title = iconv(song[('plugin/mad', 'title')])\n except KeyError:\n title = ''\n if artist == \"\" and title == \"\":\n name = os.path.split(song[('server', 'url')])[1]\n name = os.path.splitext(name)[0]\n name = urllib.unquote(name.decode('utf-8').encode('latin1'))\n name = name.replace(\"+\", \" \")\n lst.append(' ' + name)\n else:\n lst.append(' %s - %s' % (artist.ljust(6), title))\n\n return lst", "def cmd_pasetplaylist(self, data, client, cmd):\n if not self._isplaylist_enabled and not self._isranked:\n client.message('Playlists are not enabled in this server. You can\\'t set a playlist!')\n return\n\n _number_of_playlists = len(self._playlists)\n\n if not data:\n client.message('missing parameter, try !help pasetplaylist')\n return\n\n try:\n float(data)\n except ValueError:\n client.message('Please use a playlist number, %s is not a numeric value' % data)\n return\n\n data = int(data)\n if data not in range(1, _number_of_playlists + 1):\n client.message('Playlist number %s out of range! Please enter a valid number' % data)\n else:\n self.console.write('setadmindvar playlist %s' % data, maxRetries=5)\n client.message('Changing playlist to ^3%s - %s' % (data, self._playlists[data]))", "def add_the_song_to_playlist(self):\n com_util.tap_on(self.driver, element['AddToPlaylist'])\n # com_util.send_to(self.driver, element['EnterThePlaylist'], 'My Songs')\n com_util.tap_on(self.driver, element['ClickMySongs'])\n # com_util.tap_on(self.driver, element['SaveBtn'])\n com_util.tap_on(self.driver, element['CancelBtn'])\n com_util.tap_on(self.driver, element['DownArrow'])", "def show_playlist(self, playlist_name):\n \n if playlist_name.lower() not in self.playlists:\n print(\"Cannot show playlist\", playlist_name, end=\"\")\n print(\": Playlist does not exist\")\n elif len(self.playlists[playlist_name.lower()]) == 0:\n print(\"Showing playlist:\", playlist_name)\n print(\"No videos here yet\")\n else:\n print(\"Showing playlist:\", playlist_name)\n for video in self.playlists[playlist_name.lower()]:\n if video.flagged:\n print(f\"{self.videos_dict[video]} - FLAGGED (reason: {video.flag_reason})\")\n else:\n print(self.videos_dict[video])", "def add_to_playlist(self, playlist_name, video_id):\n vid = self._video_library.get_video(video_id)\n if vid and (playlist_name.lower() in self.playlists):\n if video_id not in self.playlists[playlist_name.lower()]:\n print(\"Added video to {0}: {1}\".format(playlist_name, vid.title))\n self.playlists[playlist_name.lower()].append(video_id)\n else:\n print(\"Cannot add video to {0}: Video already added\".format(playlist_name))\n elif playlist_name not in self.playlists:\n print(\"Cannot add video to {0}: Playlist does not exist\".format(playlist_name))\n elif not vid:\n print(\"Cannot add video to {0}: Video does not exist\".format(playlist_name))\n #print(f\"Added video to {self._video_playlist.name}: {video_id}\")\n\n #print(f'Added video to {playlist.name}: {playlist.videos}, {video_id_list}')\n #else:\n #print(f'Cannot add video to [: Video does not exist')", "def __init__(self, name: str, description: str):\n self.name = name\n self.description = description", "def generate_playlist_display():\n if not g.ytpls:\n g.message = c.r + \"No playlists found!\"\n return logo(c.g) + \"\\n\\n\"\n g.rprompt = page_msg(g.current_page)\n\n cw = getxy().width\n fmtrow = \"%s%-5s %s %-12s %-8s %-2s%s\\n\"\n fmthd = \"%s%-5s %-{}s %-12s %-9s %-5s%s\\n\".format(cw - 36)\n head = (c.ul, \"Item\", \"Playlist\", \"Author\", \"Updated\", \"Count\", c.w)\n out = \"\\n\" + fmthd % head\n\n for n, x in enumerate(g.ytpls):\n col = (c.g if n % 2 == 0 else c.w)\n length = x.get('size') or \"?\"\n length = \"%4s\" % length\n title = x.get('title') or \"unknown\"\n author = x.get('author') or \"unknown\"\n updated = yt_datetime(x.get('updated'))[1]\n title = uea_pad(cw - 36, title)\n out += (fmtrow % (col, str(n + 1), title, author[:12], updated, str(length), c.w))\n\n return out + \"\\n\" * (5 - len(g.ytpls))", "def getPlaylist(self,name):\n playlist = self.getAllPlaylists(name)\n return playlist[0] if playlist else None", "def add_tracks():\n sp = credentials()\n tracks = spotify_tracklist()\n playlist_id = grab_playlist()\n sp.user_playlist_add_tracks('truetiming', playlist_id, tracks)", "def add_to_playlist(self, playlist_name, video_id):\n playlist_exists = False\n video_id_exists = False\n for playlist in list(self.playlists.keys()):\n if playlist_name.upper() == playlist.upper():\n playlist_exists = True\n real_playlist_name = playlist\n break\n \n videos = self._video_library.get_all_videos()\n for v in videos:\n if v.video_id.lower() == video_id.lower():\n video_id_exists = True\n video_title = v.title\n break\n video_flagged = False\n if self.flagged:\n for videos_f in self.flagged:\n if video_id.lower() in videos_f:\n video_flagged = True\n reason = videos_f[1]\n break\n if video_flagged:\n print(f\"Cannot add video to {playlist_name}: Video is currently flagged (reason:{reason})\")\n elif playlist_exists == False:\n print(f\"Cannot add video to {playlist_name}: Playlist does not exist\")\n \n elif video_id_exists == False:\n print(f\"Cannot add video to {playlist_name}: Video does not exist\")\n\n elif video_id.lower() in self.playlists[real_playlist_name]:\n print(f\"Cannot add video to {playlist_name}: Video already added\")\n else:\n self.playlists[real_playlist_name].append(video_id.lower())\n print(f\"Added video to {playlist_name}: {video_title}\")\n\n # print(\"add_to_playlist needs implementation\")", "def show_playlist(self, playlist_name):\n playlist_id = playlist_name.lower()\n if not playlist_id in self.playlists.keys():\n print(f\"Cannot show playlist {playlist_name}: Playlist does not exist\")\n return\n\n playlist = self.playlists.get(playlist_id)\n videos = playlist.videos\n\n if len(videos) == 0:\n print(f\"Showing playlist: {playlist_name}\")\n print(\"No videos here yet\")\n return\n\n print(f\"Showing playlist: {playlist_name}\")\n for video_id in videos:\n print(self._video_library.get_video(video_id))\n return", "async def playlist(self, ctx, *, query):\n # Setup the headers with the token that should be here\n headers = {\"Authorization\": \"Bearer {}\".format(self._token)}\n opts = {\"q\": query, \"type\": \"playlist\"}\n url = \"https://api.spotify.com/v1/search\"\n response = await utils.request(url, headers=headers, payload=opts)\n try:\n await ctx.send(\n response.get(\"playlists\")\n .get(\"items\")[0]\n .get(\"external_urls\")\n .get(\"spotify\")\n )\n except (KeyError, AttributeError, IndexError):\n await ctx.send(\"Couldn't find a song for:\\n{}\".format(query))", "def playlist(self, playlist_id: str, fields: str = None,\n market: str = 'from_token'):\n return self._get('playlists/' + playlist_id,\n fields=fields, market=market)", "def main():\n\n for i in range(1, 4):\n print(\"\\nSAMPLE INPUT {}\".format(i))\n\n playlist = Playlist()\n\n filename = \"testinput{}.txt\".format(i)\n\n with open(filename, 'r') as testfile:\n operation_list = testfile.read().splitlines()\n\n for line in operation_list:\n operation = line.split(',')\n op_type = operation[0]\n if op_type == 'ADD':\n title, artist, genre, is_fav = operation[1:]\n playlist.add(Track(title, artist, genre, is_fav))\n elif op_type == 'DELTITLE':\n title = operation[1]\n playlist.delete_title(title)\n elif op_type == 'DELPOS':\n position = int(operation[1])\n playlist.delete_position(position)\n elif op_type == 'MOVE':\n old_pos, new_pos = int(operation[1]), int(operation[2])\n playlist.move(old_pos, new_pos)\n elif op_type == 'COUNTGENRE':\n genre = operation[1]\n playlist.count_genre(genre)\n elif op_type == 'COUNTFAV':\n playlist.count_favourite()\n elif op_type == 'PRINT':\n playlist.print_playlist()", "def add_song(name, duration):\n song = Song(\n name=name,\n duration=duration,\n )\n db.session.add(song)\n db.session.commit()\n\n return song", "def playlist_remove(name):\n if name.isdigit() or g.userpl.get(name):\n\n if name.isdigit():\n name = int(name) - 1\n name = sorted(g.userpl)[name]\n\n del g.userpl[name]\n g.message = \"Deleted playlist %s%s%s\" % (c.y, name, c.w)\n g.content = playlists_display()\n save_to_file()\n\n else:\n g.message = F('pl not found advise ls') % name\n g.content = playlists_display()", "def playlist_num_filename(**kwargs):\n return f\"{kwargs['track_num']} - {default_filename(**kwargs)}\"", "def __init__(self, name, desc):\n self.name = name\n self.desc = desc\n self.group = None", "def add_to_playlist(self, playlist_name, video_id):\n video = self._video_library.get_video(video_id)\n for i in playlist_name:\n if i.title == video.title:\n print(f\"Cannot add video to {playlist_name}: Video already added\")\n else:\n print(f\"Added video to {playlist_name}: {video.title}\")", "def import_spotify(info: dict) -> (str, int):\n url = info[\"playlist_url\"]\n # Validate URL\n matches = (\n re.match(r\"^https?://open\\.spotify\\.com/playlist/([a-zA-Z\\d]*)/?\", url)\n if isinstance(url, str)\n else None\n )\n if not matches:\n return \"Invalid URL\", 400\n playlist_id = matches.group(1)\n query_url = \"https://api.spotify.com/v1/playlists/\" + playlist_id\n query_headers = {\"Authorization\": \"Bearer {}\".format(info[\"access_token\"])}\n # Get/create playlist\n playlist_json = requests.get(query_url, headers=query_headers).json()\n if \"error\" in playlist_json:\n status = playlist_json[\"error\"].get(\"status\")\n message = playlist_json[\"error\"].get(\"message\")\n return (\n message if message else \"Error retrieving playlist\",\n status if status else 500,\n )\n playlist = Playlist(\n name=playlist_json[\"name\"],\n last_sync_spotify=timezone.now(),\n spotify_id=playlist_id,\n )\n if \"user\" in info:\n playlist.owner = PlaylstrUser.objects.filter(id=info[\"user\"]).first()\n if \"owner\" in playlist_json:\n playlist.spotify_creator_id = playlist_json[\"owner\"][\"id\"]\n playlist.spotify_creator_name = playlist_json[\"owner\"][\"display_name\"]\n playlist.save()\n # Get playlist tracks\n tracks_response = requests.get(query_url + \"/tracks\", headers=query_headers)\n if tracks_response.status_code != 200:\n return tracks_response.reason, 500\n tracks_json = tracks_response.json()\n if \"error_description\" in tracks_json:\n return tracks_json[\"error_description\"], 500\n # Get list of tracks\n index = -1\n while \"next\" in tracks_json and tracks_json[\"next\"] is not None:\n for j in tracks_json[\"items\"]:\n index += 1\n track = track_from_spotify_json(j[\"track\"])\n try:\n PlaylistTrack.objects.create(\n playlist=playlist, track=track, index=index\n )\n except IntegrityError as e:\n print(\"Error adding track {}: {}\".format(str(track), str(e)))\n continue\n tracks_json = requests.get(tracks_json[\"next\"], headers=query_headers).json()\n return str(playlist.playlist_id), 200", "def playlist_rename_idx(_id, name):\n _id = int(_id) - 1\n playlist_rename(sorted(g.userpl)[_id] + \" \" + name)", "def create(data):\n \n return Partlist(\n list_id = data['id'],\n name = data['name'],\n pieces = data['num_parts'])", "def __init__(self, _format):\n path = _format + os.sep + \"playlists\"\n self.__playlists = []\n for root, directory, files in os.walk(path):\n for file in files:\n if file.endswith(\".txt\"):\n self.__playlists.append(file[:-4])", "def play_pl(name):\n if name.isdigit():\n name = int(name)\n name = sorted(g.userpl)[name - 1]\n\n saved = g.userpl.get(name)\n\n if not saved:\n name = get_near_name(name, g.userpl)\n saved = g.userpl.get(name)\n\n if saved:\n g.model.songs = list(saved.songs)\n play_all(\"\", \"\", \"\")\n\n else:\n g.message = F(\"pl not found\") % name\n g.content = playlists_display()", "def test_api_video_create_for_nonexistent_playlist(self):\n user = factories.UserFactory()\n some_uuid = uuid.uuid4()\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n self.assertEqual(models.Video.objects.count(), 0)\n\n response = self.client.post(\n \"/api/videos/\",\n {\"lti_id\": \"video_one\", \"playlist\": some_uuid, \"title\": \"Some video\"},\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(models.Video.objects.count(), 0)\n self.assertEqual(response.status_code, 403)", "def get_playlist_insert_request(self, youtube, playlist_id, video_id):\n snippet = dict(\n playlistId=playlist_id,\n resourceId=dict(\n kind='youtube#video',\n videoId=video_id\n )\n )\n return youtube.playlistItems().insert(\n part='snippet',\n body={'snippet': snippet}\n )", "def create_limit(self, test_suite_name, test_instance, test_pins, test_modes, test_numbers, usl_list, lsl_list, test_limits_dict=None):\n if not test_limits_dict:\n test_limits_dict = self.test_limits_dict\n if not isinstance(test_modes, list):\n test_modes = [test_modes]\n if not isinstance(test_pins, list):\n test_pins = [test_pins]\n if not isinstance(test_numbers, list):\n test_numbers = [test_numbers]\n if not isinstance(usl_list, list):\n usl_list = [usl_list]\n if not isinstance(lsl_list, list):\n lsl_list = [lsl_list]\n pass" ]
[ "0.7272547", "0.68430287", "0.673538", "0.6641419", "0.661609", "0.65667766", "0.6543431", "0.6541754", "0.6530266", "0.6512793", "0.6474164", "0.64047396", "0.63702667", "0.6338417", "0.6316106", "0.6193181", "0.617585", "0.61417544", "0.60676473", "0.6046706", "0.5912048", "0.5755235", "0.5707441", "0.5701937", "0.56886506", "0.56847966", "0.5667215", "0.5581864", "0.5569559", "0.5500984", "0.5484833", "0.5464885", "0.5439408", "0.5426098", "0.5404477", "0.5402463", "0.5391245", "0.53723866", "0.5238054", "0.5230561", "0.52087384", "0.51502496", "0.51479536", "0.5127317", "0.51230276", "0.5117532", "0.5113797", "0.5101793", "0.50971043", "0.5069566", "0.5068635", "0.5044123", "0.5041505", "0.5018521", "0.5004427", "0.49922842", "0.4990943", "0.49822748", "0.49792248", "0.49178788", "0.49163815", "0.4916289", "0.48964754", "0.48932102", "0.48910668", "0.48838615", "0.48825252", "0.4882253", "0.488159", "0.4873907", "0.48618823", "0.48503125", "0.48477384", "0.48409888", "0.48406067", "0.4833587", "0.48194036", "0.48114756", "0.4808986", "0.47986573", "0.47908512", "0.47880188", "0.4787548", "0.47867548", "0.47826898", "0.47715807", "0.4768982", "0.476389", "0.47567055", "0.4755809", "0.47439244", "0.47392985", "0.47373125", "0.4733477", "0.47329405", "0.47328484", "0.4732402", "0.47281638", "0.47275117", "0.47271973" ]
0.7137164
1
[Will generate a list of 10 songs with given song name]
def search_song(self, name): self.logger.debug('Searched for Song: {}'.format(name)) results = self.sp.search(q='track:' + name, type='track') songs = [song for song in results['tracks']['items']] i = 1 songs_ls = [] table_ls = [] for song in songs: table_ls.append([i, song['name'][0:20].strip(), song['album']['name'][0:20].strip(), "%0.2f" % (song['duration_ms'] / 60000), song['popularity']]) songs_ls.append(song['uri']) i = i + 1 return songs_ls, table_ls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simple_songs_list(name_of_album):\r\n songs = []\r\n data1 = dbase()\r\n data1 = data1[name_of_album][0]\r\n for song in data1.keys():\r\n songs += [song]\r\n return songs", "def songs_list(name_of_album):\r\n songs = \"\"\r\n data = dbase()\r\n data = data[name_of_album][0]\r\n for song in data.keys():\r\n songs += song\r\n songs += \", \"\r\n return songs[:-2]", "def list(ctx):\n\tfor songName in songs:\n\t\tyield from bot.send_message(ctx.message.author,songName)", "def get_songs(self, song_list):\n self.songs = [[s.name, s.movie_name] for s in song_list\n if s.raga == self.name]", "def get_songs(self):\n search_object = {\"size\":25000,\n 'query': {'term': {FIELD_FINGERPRINTED: True}}, \"fields\": [FIELD_SONGNAME, FIELD_FILE_SHA1,\n FIELD_TOTAL_HASHES]}\n response = self.cursor.search(index = SONGS_INDEXNAME, body=search_object)\n #print(\"get_songs response: \",response)\n arr = []\n for hit in response[\"hits\"][\"hits\"]:\n dct = {\"song_name\":hit['_source'][FIELD_SONGNAME],\"total_hashes\":hit['_source'][FIELD_TOTAL_HASHES],\n \"file_sha1\":hit['_source'][FIELD_FILE_SHA1]}\n arr.append(dct)\n return arr", "def bottle_song_for(num):\n pass", "def songs(self):\n return Html.find_song_names(self.content)", "def random_by_genre_track_list(self):\n\n genre = self.addon_args[\"foldername\"][0].decode(\"utf-8\")\n\n xbmcplugin.setContent(self.addon_handle, \"songs\")\n\n for track in self.connection.walk_random_songs(\n size=self.random_count, genre=genre):\n self.add_track(track, show_artist=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def selectSongs():\n\tsql =\"select songs.title, artist.name, album.name from songs, album, \" \\\n\t+ \"artist join songs_album on songs.id=songs_album.songs_id \" \\\n\t+ \"join songs_artist on songs.id=songs_artist.songs_id \" \\\n\t+ \"where album.id=songs_album.album_id \" \\\n\t+ \"and artist.id=songs_artist.artist_id\"\n\tc, conn = connect()\n\tretr = c.execute(sql)\n\tsongs = []\n\tfor entry in retr:\n\t\tsongs.append(music.song(title=entry[0], artist=entry[1], album=entry[2]))\n\treturn songs", "def get_songs(path):\r\n song_list = []\r\n genre_paths = glob.glob(path + '/*')\r\n for genre_path in genre_paths:\r\n artist_paths = glob.glob(genre_path + '/*')\r\n for artist_path in artist_paths:\r\n album_paths = glob.glob(artist_path + '/*')\r\n for album_path in album_paths:\r\n lyrics_paths = glob.glob(album_path + '/*.txt')\r\n for lyrics_path in lyrics_paths:\r\n song = {}\r\n song[\"genre\"] = genre_path.replace(path + '/', '')\r\n song[\"artist\"] = artist_path.replace(genre_path + '/', '')\r\n song[\"album\"] = album_path.replace(artist_path + '/', '')\r\n song[\"lyrics\"] = open(lyrics_path).read()\r\n song[\"name\"] = lyrics_path[:-4].replace(album_path + '/', '')\r\n song[\"x\"] = 0\r\n song[\"y\"] = 0\r\n song_list.append(song)\r\n return song_list", "def generate_list(self):\n with open(self.index_file, \"r\") as fh:\n original_songs = [line.rstrip() for line in fh.readlines()]\n \n if self.search_terms:\n # refine using search terms\n self.songs = []\n for st in self.search_terms:\n refined_songs = original_songs\n for s in st:\n refined_songs = [song for song in refined_songs \n if s.lower() in song.lower()]\n self.songs += sorted(refined_songs)\n else:\n self.songs = original_songs\n \n # If we've specified a num_songs, slice out this much\n if self.num_songs: \n self.songs[:] = self.songs[:self.num_songs] \n else:\n self.num_songs = len(self.songs)", "def getSongsSpotify(song_name,access_token):\n song_name = song_name.strip()\n query = \"https://api.spotify.com/v1/search?q={}&type=track&limit=20&offset=0\".format(song_name)\n response = requests.get(\n query,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(access_token)\n }\n )\n response_json = response.json()\n # \n \n songs_no = response_json[\"tracks\"][\"total\"]\n if songs_no == 0 :\n return {\"songs_no\" : songs_no}\n songs = response_json[\"tracks\"][\"items\"]\n if(len(songs)<5):\n uri = [songs[0][\"uri\"]]\n names = [songs[0][\"name\"]]\n artists = [songs[0][\"artists\"][0][\"name\"]]\n imageUrl = [songs[0][\"album\"][\"images\"][-1][\"url\"]]\n response_obj = {\n \"songs_no\" : songs_no,\n \"uri\" : uri,\n \"names\" : names,\n \"artists\" : artists,\n \"images\" : imageUrl\n }\n else:\n uri = [ songs[i][\"uri\"] for i in range(0,5)]\n names = [songs[i][\"name\"] for i in range(0,5)]\n artists = [songs[i][\"artists\"][0][\"name\"] for i in range(0,5)]\n imageUrl = [songs[i][\"album\"][\"images\"][-1][\"url\"] for i in range(0,5)]\n response_obj = {\n \"songs_no\" : songs_no,\n \"uri\" : uri,\n \"names\" : names,\n \"artists\" : artists,\n \"images\" : imageUrl\n }\n return response_obj", "def get_song_list(self):\n return self.song_list", "def listSongNameAndArtists(self, resultList, initIndex=0):\n stringToReturn = '' \n for r in range(len(resultList)):\n stringToReturn += f\"{r + initIndex + 1}) {resultList[r]['name']} by {self.listAllArtistsInResult(resultList[r])}\\n\"\n\n return stringToReturn", "def random_by_year_list(self):\n\n from_year = xbmcgui.Dialog().input(\n \"From year\", type=xbmcgui.INPUT_NUMERIC)\n to_year = xbmcgui.Dialog().input(\n \"To year\", type=xbmcgui.INPUT_NUMERIC)\n\n xbmcplugin.setContent(self.addon_handle, \"songs\")\n\n for track in self.connection.walk_random_songs(\n size=self.random_count, from_year=from_year, to_year=to_year):\n self.add_track(track, show_artist=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def get_song_id_base(self, songname, singger=\"\"):\n # http = urllib3.PoolManager\n # http.request(\"GET\",get_song_id_url(key_word))\n url = get_song_id_url()\n params = {'s': songname, 'offset': \"0\", 'type': \"1\", 'limit': \"1\"}\n result = self.post_request_ex(url, params)\n print(result)\n count=result['result']['songCount']\n id=\"\"\n for i in range(int(count/10)+1):\n params['offset'] = i*10\n params['limit'] = 10\n result = self.post_request_ex(url,params)\n\n for x in range(10):\n try:\n print(\"{}/{}=>{}\".format(i,count,result))\n if result['result']:\n # songnameData=result['result']['songs'][0]['name']\n singgername = result['result']['songs'][0]['artists'][0]['name']\n id=result['result']['songs'][0]['id']\n # if (not singger ) or ((singgername.strip().index(singger.strip())>-1) or (songnameData.index(singger)>-1)):\n if (not singger) or (singgername.strip() == (singger.strip())):\n return id\n except:\n \"\"", "def get_all_songs():\r\n return [Song.song_json(song) for song in Song.query.all()]", "def get_tracks(num=1):\n pass", "def test_top_songs(self):\n \n rss = AppleRSS()\n objs = rss.get_top_songs(limit=10)\n \n self.__test_artists('top_songs', objs)", "def getSongNames():\r\n\r\n global namen\r\n\r\n namen = []\r\n temp = []\r\n\r\n parser = argparse.ArgumentParser()\r\n \r\n parser.add_argument(\"--song1\", required = True, default = None, type= str, help= \"Name vom ersten Song, welcher verarbeitet werden soll.\")\r\n parser.add_argument(\"--song2\", required = False, default = None, type= str, help= \"Name vom zweiten Song, welcher verarbeitet werden soll.\")\r\n parser.add_argument(\"--song3\", required = False, default = None, type= str, help= \"Name vom dritten Song, welcher verarbeitet werden soll.\")\r\n parser.add_argument(\"--song4\", required = False, default = None, type= str, help= \"Name vom vierten Song, welcher verarbeitet werden soll.\")\r\n parser.add_argument(\"--song5\", required = False, default = None, type= str, help= \"Name vom fünften Song, welcher verarbeitet werden soll.\")\r\n\r\n args = parser.parse_args()\r\n\r\n opt1_value = args.song1\r\n opt2_value = args.song2\r\n opt3_value = args.song3\r\n opt4_value = args.song4\r\n opt5_value = args.song5\r\n \r\n temp.extend((opt1_value, opt2_value, opt3_value, opt4_value, opt5_value))#saves song names in temp-array\r\n\r\n for name in temp:\r\n if name != None: #if elemtent in temp-array is not None, it will be appended into the namen array\r\n namen.append(name)", "def print_songs(self):\n\t\tfor i,s in enumerate(self._songs):\n\t\t\tprint('{0}. {1}'.format(i, s.print_info()))", "def scrape_all_songs():\n print('Scraping all songs from {}'.format(URL))\n\n soup = scrapekit.handle_url(URL)\n song_elements = []\n tables = soup.findAll('table')\n\n for t in tables:\n field_index = scrapekit.get_col_index(t, field_name=\"Song\")\n\n if field_index:\n song_elements.extend(scrapekit.scrape_table_col(t, field_index))\n\n links = []\n for element in song_elements:\n l = element.find('a')\n if l:\n links.append(PREFIX + l.attrs.get('href', ''))\n return links", "def get_songs(self, per_page=10, sort=\"title\") -> \"Iterable[Song]\":\n assert sort in [\"title\", \"popularity\"]\n page = 1\n while page != None:\n response = self._api_session.request(\n \"GET\",\n f\"artists/{self.id}/songs\",\n params={\"page\": page, \"per_page\": per_page, \"sort\": sort},\n )\n data = response.json()[\"response\"]\n page = data[\"next_page\"]\n for song in data[\"songs\"]:\n yield Song(song, self._api_session, self._web_session)", "def add_songs(self, name, year, title):\n\n album_found = find_object(name, self.album)\n if album_found is None:\n print(\"Not Found \" + name)\n album_found = Album(name, year, self.name)\n self.add_album(album_found)\n else:\n print(\"Found album \"+name)\n\n album_found.add_songs(title)", "def get_all_songs() -> Generator[dict, None, None]:\n\n logging.debug(\"Fetching from server\")\n\n api = _get_api()\n\n for song_page in api.get_all_songs(incremental=True):\n for song in song_page:\n yield song", "def play_all(pre, choice, post=\"\"):\n options = pre + choice + post\n play(options, \"1-\" + str(len(g.model.songs)))", "def random_by_genre_list(self):\n\n for genre in self.connection.walk_genres():\n url = self.build_url({\n \"mode\": \"random_by_genre_track_list\",\n \"foldername\": genre[\"value\"].encode(\"utf-8\")})\n\n li = xbmcgui.ListItem(genre[\"value\"])\n xbmcplugin.addDirectoryItem(\n handle=self.addon_handle, url=url, listitem=li, isFolder=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def get_all_tracks():\n query_format = f\"track:\"\n\n search_string_letter_ids = [0]\n\n tracks = {}\n\n total = 0\n\n while search_string_letter_ids is not None:\n search_string = construct_search_string(search_string_letter_ids)\n count = track_count(query_format + search_string)\n print(f\"{search_string} : {count}\")\n if count < 2000:\n for i in range(0, count, 50):\n track_results = sp.search(query_format + search_string, type='track', limit=50, offset=i)\n for t in track_results['tracks']['items']:\n if t['id'] not in tracks:\n total += 1\n tracks[t['id']] = {'name': t['name']}\n\n search_string_letter_ids = get_next_search_string(search_string_letter_ids, last_was_under=True)\n else:\n search_string_letter_ids = get_next_search_string(search_string_letter_ids, last_was_under=False)\n\n print(f\"Tracks Saved In File: {total}\")\n\n file = save_to_json(tracks, f\"tracks.json\")\n return file", "def get_song_list(self, artist: str) -> List[str]:\n artist = self.just_replace_strings_with_nothing(artist)\n\n url = self.list_url + artist + '/songs'\n\n resp = requests.get(url)\n\n content = bs4.BeautifulSoup(resp.content)\n\n song_list = content.text[content.text.index(\n 'MP3s') + 5:content.text.index('About Song List')]\n\n song_list = re.sub('\\n', ',', song_list)\n song_list = re.sub(',+', ',', song_list)\n song_list = re.sub(', ,', ', ', song_list)\n\n song_list = re.split(',', song_list)\n for i in range(len(song_list)):\n song_list[i] = song_list[i].lstrip(' ')\n song_list[i] = re.sub(\"[.,']\", '', song_list[i])\n song_list[i] = re.sub(\"&\", 'and', song_list[i])\n song_list[i] = re.sub('\\s+', ' ', song_list[i])\n\n song_list = [i for i in song_list if i != '']\n\n return song_list", "def bottle_song_while(num):\n pass", "def get_songs_via_pagination():\n\n page_no = request.args.get('page_no')\n\n row_size = request.args.get('row_size')\n\n songs_details = mod.get_songs(int(page_no), int(row_size))\n\n return jsonify(songs_details)", "def create_song(self, lnmn, lnmx):\n # decide on the length of the song\n nlng = random.randint(lnmn, lnmx)\n\n # load the database\n lns = self.read_database()\n\n # randomly pick nlng lines\n rsong = []\n for i in range(nlng):\n j = random.randint(0,len(lns)-1)\n rsong.append(lns[j])\n\n return rsong", "def get_all_songs(self):\n to_send = self.db.get_all_songs()\n to_send = '$'.join(to_send)\n self.send_message(to_send)", "def generate_playlist():\n\n with open(r'C:\\Users\\adria\\OneDrive\\Desktop\\Muzica.txt', 'w+', encoding='utf-8') as playlist:\n playlist_songs = os.listdir('D:\\\\Muzica\\\\')\n for song in playlist_songs:\n playlist.write(song + '\\n')", "def main():\n songs = []\n first_line = sys.stdin.readline().split(' ', 1)\n songs_on_album, songs_to_select = int(first_line[0]), int(first_line[1])\n for i in range(songs_on_album):\n line = sys.stdin.readline().split(' ', 1)\n song = Song(line[1], int(line[0]), i+1)\n songs.append(song)\n\n print_quality_songs(songs, songs_to_select)", "def get_songs(library):\n songs = []\n for song in library:\n title, artist, album = song['title'], song['artist'], song['album']\n seconds = int(song['durationMillis']) // 1000\n songs.append({'artist': artist, 'title': title, 'album': album, 'seconds': seconds})\n return songs", "def _make_song_list_html(song_list):\n return '<p class=\"song_name\">' + '<br>'.join([f'{song[\"title\"]} <span class=\"artist_album\">{song[\"artist\"]} - {song[\"album\"]}</span>' for song in song_list]) + '</p>'", "def get_songs(self, offset=None):\n return self.__get('songs')", "def get_all_song_names(self):\n try:\n # Auto-close\n with closing(self.connection) as con:\n # Auto-commit\n with con:\n # Auto-close\n with closing(con.cursor()) as cursor:\n cursor.execute(\"\"\"\n SELECT name\n FROM nodes\n WHERE type = \"song\";\n \"\"\")\n return [x[0] for x in cursor.fetchall()]\n except sqlite3.OperationalError as e:\n print(\"ERROR: Could not retrieve songs: {}\".format(str(e)))\n return []", "def gen_random_samples():\n if os.path.exists('Song_Samples'):\n pass\n else:\n os.mkdir('Song_Samples')\n for filename in os.listdir(\"Songs\"):\n rate, data = wavfile.read(os.path.join(\"Songs\", filename))\n song_duration = len(data) // rate\n start_point = randint(0, song_duration - SAMPLE_DURATION)\n end_point = start_point + SAMPLE_DURATION\n subprocess.call(['ffmpeg', '-i', os.path.join(\"Songs\", filename),\n '-ss', str(datetime.timedelta(seconds=start_point)), '-to',\n str(datetime.timedelta(seconds=end_point)), '-y', os.path.join(\"Song_Samples\", filename)])", "def get_players(n, playerspace):\n ps = []\n for i in range(n):\n name = \"\"\n while name == \"\":\n name = input(\"What's the name of player @ index {} (can't be empty): \".format(i))\n p = Player(name, i)\n p.playerspace = playerspace()\n ps.append(p)\n return ps", "def makeDatabaseNamesList(n, ):", "def search_with_song(song_name: str, mode: int) -> str:\n SONG_NAME = 1\n db = get_db_name_by_mode(mode)\n song_list = get_singers_and_songs_by_mode(mode)[1]\n res = []\n songs_data = []\n\n db_connection = sqlite3.connect(db)\n if get_acceleration_flag(mode, True):\n for letter in song_name:\n db_cursor = db_connection.cursor()\n db_cursor.execute('SELECT * FROM TEST WHERE SONG LIKE \"%' + letter + '%\"')\n songs_data.extend([song for song in db_cursor.fetchall()])\n pass\n songs_data = list(dict.fromkeys(songs_data))\n similar_songs = [song[SONG_NAME] for song in songs_data]\n similar_songs = compare.compare(similar_songs, song_name, ac=True)\n for song_with_similar_score in similar_songs: # pick the song in similar_songs from in songs_data\n for song_info in songs_data:\n if song_with_similar_score[SONG_NAME] == song_info[SONG_NAME]:\n res.append(song_info)\n break\n pass\n else:\n similar_songs = compare.compare(song_list, song_name)\n for song_with_similar_score in similar_songs:\n db_cursor = db_connection.cursor()\n db_cursor.execute('SELECT * FROM TEST WHERE SONG = \"' + song_with_similar_score[SONG_NAME] + '\"')\n res.extend(db_cursor.fetchall())\n pass\n pass\n db_connection.close()\n\n if len(res) == 0:\n return response.pack(response.EMPTY, res)\n else:\n return response.pack(response.SUCCESS, res)\n pass", "def song_lyrics(ans):\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n words = dbase()[album][0][song]\r\n words = words[2]\r\n return words", "def update():\n\tglobal songList\n\tglobal songs\n\tsongList=os.listdir(\"./music/\")\n\tsongs=['```']\n\tfor song in songList:\n\t\tif len(songs[-1])>1800:\n\t\t\tsongs[-1]+='```'\n\t\t\tsongs.append('```')\n\t\tif '.mp3' in song:\n\t\t\tsongs[-1]+=song.replace('.mp3','')\n\t\t\tsongs[-1]+='\\n'\n\tsongs[-1]+='```'", "def search_for_album(album_name):\n\n print(f'Searching for album: {album_name}')\n\n search_result = spotifyObject.search(q=f'\"{album_name}\"', limit=20, type='album')\n\n items = search_result['albums']['items']\n\n results = []\n\n for item in items:\n if len(item['artists']) > 1:\n artists = tuple(art['name'] for art in item['artists'])\n else:\n artists = item['artists'][0]['name']\n\n results.append((artists, item['name'], item['id']))\n\n return results", "def nextsong(self):\n\t\tmax = db.maxSongs()\n\t\tid = helper.getrandom(max)\n\t\tsong = db.selectPlay(id)\n\t\tretval = json.dumps(song.getDict()) \n\t\treturn retval", "def makeSong(text):\n song = []\n text = text.replace(\"\\n\", \";\")\n songData = text.split(\";\")\n lineNumber = 1\n for line in songData:\n _parseSongLine(song, line, lineNumber, \"text\")\n lineNumber += 1\n return song", "def song_by_word(ans):\r\n songs_list = \"\"\r\n ans = ans.lower()\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n song = str(song)\r\n if ans in song.lower():\r\n songs_list += song + \", \"\r\n return songs_list[:-2]", "def genre_list(self):\n\n for genre in self.connection.walk_genres():\n url = self.build_url({\n \"mode\": \"albums_by_genre_list\",\n \"foldername\": genre[\"value\"].encode(\"utf-8\")})\n\n li = xbmcgui.ListItem(genre[\"value\"])\n xbmcplugin.addDirectoryItem(\n handle=self.addon_handle, url=url, listitem=li, isFolder=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def getSongs(self, songIDList):\n for i, songID in enumerate(songIDList):\n if not isinstance(songID, str):\n songIDList[i] = str(songID)\n\n currAPIVersion = self.config['apiVersion']\n #currAPIVersion = 0\n currAPIURL = URL_NEAPIS[sys._getframe().f_code.co_name]\n currAPIURL = currAPIURL[min(currAPIVersion, len(currAPIURL) - 1)]\n if currAPIVersion == 0:\n currDict = {\n 'ids' : repr(songIDList).replace(\" \", \"\").replace(\"'\", \"\").replace(\"\\\"\", \"\"),\n }\n if currAPIVersion == 1:\n currDict = {\n #'c' : json.dumps([{ \"ids\" : songIDList}]).replace(\" \", \"\"),\n 'ids' : repr(songIDList).replace(\" \", \"\").replace(\"'\", \"\").replace(\"\\\"\", \"\"),\n #'c' : json.dumps([{ \"id\" : [int(x) for x in songIDList]}]).replace(\" \", \"\"),\n }\n\n currC, currR = self._mySubmit(currAPIURL, currDict)\n #print currR\n self.apiLog.info(\"%s Json Loads Begin\", sys._getframe().f_code.co_name)\n currR = json.loads(currR)\n self.apiLog.info(\"%s Json Loads End\", sys._getframe().f_code.co_name)\n self.updateCookie(currC)\n self.checkCode(currR['code'])\n#modify\n sortedData = range(len(songIDList))\n for song in currR['songs']:\n sortedData[songIDList.index(str(song['id']))] = song\n\n for i, song in enumerate(sortedData):\n if isinstance(song, int):\n sortedData[i] = {}\n #raise NEApiError, \"not all songdetails are responsed back here.\"\n\n currR['songs'] = sortedData\n return currR, currAPIURL[2]", "def add_songs(self, songs, genius_api, nthreads=0):\n\t\tif isinstance(songs, list):\n\t\t\tprint(f'length of list songs {len(songs)}')\n\t\t\tif nthreads <2:\n\t\t\t\tfor song_id in songs:\n\t\t\t\t\t#print(song_id)\n\t\t\t\t\tsong = genius_api.search(song_id, 'song')\n\t\t\t\t\tself.__add_song(song, genius_api)\n\t\t\t\t\t#print('songs {} added with success'.format(song['title']))\n\t\t\telif nthreads >1:\n\t\t\t\tassert len(songs) > 0\n\t\t\t\tthreads=[]\n\t\t\t\tscrapping_batch_size = len(songs) // nthreads\n\t\t\t\tprint(f'thread list size = {scrapping_batch_size}')\n\t\t\t\tfor i in range(nthreads):\n\t\t\t\t\tthreads.append(Thread(target=self.add_songs, \n\t\t\t\t\t\targs=(songs[scrapping_batch_size * i : scrapping_batch_size * (i + 1)], genius_api,)))\n\t\t\t\t\tif i == threads - 1:\n\t\t\t\t\t\tthreads[i] = Thread(self.add_songs, (songs[scrapping_batch_size * i:], genius_api,))\n\t\t\t\t\tthreads[i].start()\n\t\t\t\t\tprint('thread {} activated'.format(i+1))\n\t\telse:\n\t\t\tsong = genius_api.search(songs, 'song')\n\t\t\tself.__add_song(song, genius_api)\n\t\t\tprint('song {} added with success'.format(song['title']))", "def get_8tracks_songs(self):\n logging.info('getting tracks from 8tracks')\n API_KEY = '7fe2e057bb81abf2248a06ecab027b8dc09e01d3' \n self.info_var.set('\\n\\n Now playing 8Tracks Songs: \\n')\n\n api = Api(api_key=API_KEY)\n mixes = api.get_mixes(num_results=1)\n track = api.start_playback(mixes[0]) \n\n urls = []\n while not track['done']:\n urls.append(track['stream_url'])\n self.add_track_info('{name} by {performer}'.format(name=track['name'], performer=track['performer']))\n track = api.next_song() \n logging.info('got track {track}'.format(track=track))\n\n return urls", "def add_songs(self):\n settings = dict(initialdir=pathlib.Path().absolute(), title=\"Choose songs\", filetypes=(\n (\"flac files\", \"*.flac\"),\n (\"mp3 files\", \"*.mp3\"),\n (\"all files\", \"*\")))\n\n songs = filedialog.askopenfilenames(**settings)\n\n for song in songs:\n self.update_playlist(song)\n\n self.listbox.insert(\"end\", *[song['name'] for song in self.song_list])\n with open('last_list.pkl', 'wb') as f:\n pickle.dump(self.song_list, f)", "def get_song_data(self, song_name=None, song_id=None):\n if song_name is None and song_id is None:\n print(\"ERROR: Require one of song name and song ID to retrieve song data.\")\n return []\n elif song_name is None:\n song_name = \"%\" # match any string\n\n try:\n # Auto-close.\n with closing(self.connection) as con:\n # Auto-commit\n with con:\n # Auto-close.\n with closing(con.cursor()) as cursor:\n cursor.execute(\"\"\"\n SELECT\n song.name, artist.name, song.duration_ms, song.popularity,\n song.id, song.spotify_uri, song.acousticness, song.danceability,\n song.energy, song.instrumentalness, song.liveness, song.loudness,\n song.speechiness, song.valence, song.tempo, song.mode,\n song.musical_key, song.time_signature\n\n FROM (\n SELECT *\n FROM songs JOIN nodes ON node_id == id\n WHERE name LIKE (?)\n ) AS song JOIN nodes AS artist ON main_artist_id == artist.id;\n \"\"\", (song_name,))\n return [\n dict(\n song_name=x[0], artist_name=x[1], duration_ms=x[2], popularity=x[3],\n id=x[4], spotify_uri=x[5], acousticness=x[6], danceability=x[7],\n energy=x[8], instrumentalness=x[9], liveness=x[10], loudness=x[11],\n speechiness=x[12], valence=x[13], tempo=x[14], mode=x[15],\n musical_key=x[16], time_signature=x[17],\n ) for x in cursor.fetchall()\n if song_id is None or song_id == x[4]\n ]\n\n except sqlite3.OperationalError as e:\n print(\"ERROR: Could not retrieve data for song with name '{}': {}\".format(song_name, str(e)))\n return []", "def getAllSongs(self):\n return self.__songDictionary", "def get_tracks(search_string=None):\n if search_string is None:\n print('Please use a search string with get_tracks function')\n exit(0)\n item_type = \"tracks\"\n info_dict = spotify.search(q=search_string, limit=10, type='track')\n items = info_dict[item_type][\"items\"]\n tracks = []\n for i in range(len(items)):\n album_name = items[i][\"album\"][\"name\"]\n album_type = items[i][\"album\"][\"album_type\"]\n artists_names = ', '.join([\n items[i][\"artists\"][index][\"name\"]\n for index in range(len(items[i][\"artists\"]))\n ])\n track_name = items[i][\"name\"]\n track_id = items[i][\"id\"]\n track_popularity = items[i][\"popularity\"]\n tracks.append({\"Album Name\": album_name,\n \"Album Type\": album_type,\n \"Artist(s)\": artists_names,\n \"Track Name\": track_name,\n \"Popularity\": track_popularity,\n \"Track ID\": track_id\n })\n tracks.sort(key=lambda d: d['Popularity'], reverse=True)\n return tracks", "def list(\n self,\n name,\n ):\n pass", "def loadTestSong (filename):\n testSong = {}\n #information of analysed song stored in dictionary testSong\n testSong[\"spectrogram\"] = STFTsignal.getSTFTofFile(filename)\n testSong[\"name\"] = filename\n return testSong", "def getplaylisttracks(accesstoken, chosenplaylist):\n\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n\n limit = 100\n\n payload = {}\n payload[\"limit\"] = limit\n payload[\"offset\"] = 0\n\n r = requests.get(\n \"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\".format(chosenplaylist.ownerid, chosenplaylist.playlistid),\n headers=headers,\n params=payload)\n\n response = r.json()\n\n if \"items\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait for the amount of time specified in response header\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n # try again\n return(getplaylisttracks(accesstoken, chosenplaylist, userid))\n else:\n print(response[\"error\"])\n return(None)\n else:\n print('error: getplaylisttracks request failed')\n return(None)\n\n numberreceived = len(response[\"items\"])\n totalavailable = response[\"total\"]\n\n for track in response[\"items\"]:\n t = Track()\n t.trackid = track[\"track\"][\"id\"]\n t.albumname = track[\"track\"][\"album\"][\"name\"]\n t.trackname = track[\"track\"][\"name\"]\n t.artistname = track[\"track\"][\"artists\"][0][\"name\"]\n t.popularity = track[\"track\"][\"popularity\"]\n # print(t.trackid, t.trackname, t.artistname, t.albumname)\n chosenplaylist.tracks.append(t)\n\n # if we haven't gotten all of the tracks in the playlist, request the next\n # batch\n\n while numberreceived < totalavailable:\n\n payload[\"offset\"] = payload[\"offset\"] + limit\n r = requests.get(\n \"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\".format(chosenplaylist.ownerid, chosenplaylist.playlistid),\n headers=headers,\n params=payload)\n response = r.json()\n\n if \"items\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait for the amount of time specified in response header\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n # try again\n continue\n else:\n print('error: getplaylisttracks request failed')\n print(response[\"error\"])\n return(None)\n else:\n print('error: unknown error')\n return(None)\n\n for track in response[\"items\"]:\n if track[\"is_local\"]:\n # a locally saved song. skip over it, as no way to query audio \n # features without having a spotify track id\n continue\n t = Track()\n t.trackid = track[\"track\"][\"id\"]\n t.albumname = track[\"track\"][\"album\"][\"name\"]\n t.trackname = track[\"track\"][\"name\"]\n t.artistname = track[\"track\"][\"artists\"][0][\"name\"]\n # print(t.trackid, t.trackname, t.artistname, t.albumname)\n chosenplaylist.tracks.append(t)\n\n \n numberreceived = numberreceived + len(response[\"items\"])\n\n # filter out tracks with trackid == None\n chosenplaylist.tracks = [track for track in chosenplaylist.tracks if track.trackid is not None]\n\n # print(chosenplaylist.tracks)\n return(chosenplaylist)", "def view_songs_push(self):\n #clear all data first\n self.model.removeRows(0, self.model.rowCount())\n songs = glob.glob(\"Fixed/*/*/*\")\n for song in songs:\n data = mutagen.File(song, easy=True)\n track = get_track(data['title'][0], data['artist'][0])\n self.add_track_to_box(track)", "def generate_songlist_display(song=False, zeromsg=None, frmat=\"search\"):\n # pylint: disable=R0914\n if g.browse_mode == \"ytpl\":\n return generate_playlist_display()\n\n max_results = getxy().max_results\n\n songs = g.model.songs or []\n\n if not songs:\n g.message = zeromsg or \"Enter /search-term to search or [h]elp\"\n return logo(c.g) + \"\\n\\n\"\n g.rprompt = page_msg(g.current_page)\n\n have_meta = all(x.ytid in g.meta for x in songs)\n user_columns = get_user_columns() if have_meta else []\n maxlength = max(x.length for x in songs)\n lengthsize = 8 if maxlength > 35999 else 7\n lengthsize = 5 if maxlength < 6000 else lengthsize\n reserved = 9 + lengthsize + len(user_columns)\n cw = getxy().width\n cw -= 1\n title_size = cw - sum(1 + x['size'] for x in user_columns) - reserved\n before = [{\"name\": \"idx\", \"size\": 3, \"heading\": \"Num\"},\n {\"name\": \"title\", \"size\": title_size, \"heading\": \"Title\"}]\n after = [{\"name\": \"length\", \"size\": lengthsize, \"heading\": \"Time\"}]\n columns = before + user_columns + after\n\n for n, column in enumerate(columns):\n column['idx'] = n\n column['sign'] = \"-\" if not column['name'] == \"length\" else \"\"\n\n fmt = [\"%{}{}s \".format(x['sign'], x['size']) for x in columns]\n fmtrow = fmt[0:1] + [\"%s \"] + fmt[2:]\n fmt, fmtrow = \"\".join(fmt).strip(), \"\".join(fmtrow).strip()\n titles = tuple([x['heading'][:x['size']] for x in columns])\n hrow = c.ul + fmt % titles + c.w\n out = \"\\n\" + hrow + \"\\n\"\n\n for n, x in enumerate(songs[:max_results]):\n col = (c.r if n % 2 == 0 else c.p) if not song else c.b\n details = {'title': x.title, \"length\": fmt_time(x.length)}\n details = copy.copy(g.meta[x.ytid]) if have_meta else details\n otitle = details['title']\n details['idx'] = \"%2d\" % (n + 1)\n details['title'] = uea_pad(columns[1]['size'], otitle)\n cat = details.get('category') or '-'\n details['category'] = pafy.get_categoryname(cat)\n data = []\n\n for z in columns:\n fieldsize, field = z['size'], z['name']\n if len(details[field]) > fieldsize:\n details[field] = details[field][:fieldsize]\n\n data.append(details[field])\n\n line = fmtrow % tuple(data)\n col = col if not song or song != songs[n] else c.p\n line = col + line + c.w\n out += line + \"\\n\"\n\n return out + \"\\n\" * (5 - len(songs)) if not song else out", "def print_quality_songs(songs, songs_to_select):\n songs.sort(key=lambda song: -int(song.quality()))\n for i in range(songs_to_select):\n print songs[i]", "def find_song_recommendations(access_token, tracks, target, n, params):\n track_string = '%2C'.join(tracks[:5])\n response = spotify.get_recommendations(access_token, 50, track_string, params)\n\n song_recommendation = response['tracks']\n recommendations = {song['id']: {'name': song['name']} for song in song_recommendation}\n\n moods = get_features_moods(recommendations)\n\n return order_songs(moods, target, n)", "def _gennames(prefix, base, number):\n for index in xrange(number):\n yield \"%s%d\" % (prefix, base + index)", "def all_titles(our_data):\n return [album['album'] for album in our_data]", "def get_play_names(corpus):\n play_names = []\n request_url = \"https://dracor.org/api/corpora/{}\".format(corpus)\n response = requests.get(request_url)\n if response:\n all_plays = response.json()[\"dramas\"]\n for play in all_plays:\n play_names.append(play[\"name\"])\n return play_names", "def multiple_ranks(our_data,start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_rank(count))\n count += 1", "def get_random_song(self):\n return random.choice(self.song_list)", "def construct_metadata(song):\n print(song) #temp", "def search_song(self, name, album=None, artist=None):\n\n endpoint = \"/search\"\n query = f\"track:{self._strip_punctuation(name)}\"\n if artist:\n query += f\" artist:{self._strip_punctuation(artist)}\"\n if album:\n query += f\" album:{self._strip_punctuation(album)}\"\n response = self._send(endpoint, \"GET\", params={\"q\": query, \"type\": \"track\"})\n tracks = response.json()[\"tracks\"]\n if tracks[\"total\"] == 0:\n raise SongNotFoundError(\n f\"song name={name} artist={artist} album={album} could not be found\"\n )\n return tracks[\"items\"]", "def createFileNames(nFileNames, seqPrefix):\n nameList = []\n nameList = [seqPrefix+str(i)+\".txt\" for i in range(0, nFileNames)]\n return nameList", "def make_music_rand():\n pass", "def get_playlist_tracks(playlist):\n track_ids = [id for id in load_from_json(f\"playlist_{playlist}.json\") if id is not None]\n tracks = []\n\n for i in range(0, len(track_ids), 50):\n tracks_info = sp.tracks(track_ids[i: i+50])['tracks']\n for track in tracks_info:\n if track:\n tracks.append({\n 'id': track['id'],\n 'name': track['name'],\n 'popularity': track['popularity']\n })\n df = pd.DataFrame(tracks)\n\n file = f\"playlist_{playlist}_df.csv\"\n df.to_csv(file)\n\n return file", "def next_playlist(self):\n query = \"\"\"SELECT s.id, s.filename, s.score FROM caro_playlistentry AS p, caro_song as s WHERE s.id = p.song_id AND s.score >= 0 ORDER BY p.score DESC, p.date_add ASC LIMIT 1\"\"\"\n rows = self.fetchfile(query)\n return rows", "def get_song(self): \n\n song = self.tracks.sample(n=1).to_dict('index')\n return list(song.values())[0]", "def get_year_tracks(year):\n print(f\"Total Tracks in {year}: {get_num_of_tracks(year)}\")\n\n query_format = f\"year:{year} track:\"\n\n search_string_letter_ids = [0]\n\n tracks = {}\n\n total = 0;\n\n while (search_string_letter_ids is not None):\n search_string = construct_search_string(search_string_letter_ids)\n count = track_count(query_format + search_string)\n print(f\"{search_string} : {count}\")\n if count < 2000:\n for i in range(0, count, 50):\n track_results = sp.search(query_format + search_string, type='track', limit=50, offset=i)\n for t in track_results['tracks']['items']:\n if t['id'] not in tracks:\n total += 1\n tracks[t['id']] = {'name': t['name']}\n\n search_string_letter_ids = get_next_search_string(search_string_letter_ids, last_was_under=True)\n else:\n search_string_letter_ids = get_next_search_string(search_string_letter_ids, last_was_under=False)\n\n print(f\"Tracks Saved In File: {total}\")\n\n file = save_to_json(tracks, f\"Tracks{year}.json\")\n return file", "def get_all_musicians(self):\n self.cursor.execute(\"select * from musicians\")\n self.connection.commit()\n return self.cursor.fetchall()", "def get_random_n_cleaned_names(name_list, n=100):\n random_name_list = []\n for i in range(n):\n random_name_list += [get_random_name(name_list)]\n\n return random_name_list", "def playlist(self):\n def iconv(s):\n encoding = self.options[\"id3_encoding\"]\n try:\n if encoding:\n return s.encode('latin1').decode(encoding).encode('utf-8')\n else:\n return s.encode('latin1')\n except UnicodeEncodeError:\n return \"\"\n\n lst = []\n r = self.x.playlist_list_entries()\n r.wait()\n for id in r.get_list():\n r = self.x.medialib_get_info(id)\n r.wait()\n if r.iserror():\n print r.get_error()\n lst.append(' ')\n continue\n song = r.get_propdict()\n try:\n artist = iconv(song[('plugin/id3v2', 'artist')])\n except KeyError:\n try:\n artist = iconv(song[('plugin/mad', 'artist')])\n except KeyError:\n artist = ''\n try:\n title = iconv(song[('plugin/id3v2', 'title')])\n except KeyError:\n try:\n title = iconv(song[('plugin/mad', 'title')])\n except KeyError:\n title = ''\n if artist == \"\" and title == \"\":\n name = os.path.split(song[('server', 'url')])[1]\n name = os.path.splitext(name)[0]\n name = urllib.unquote(name.decode('utf-8').encode('latin1'))\n name = name.replace(\"+\", \" \")\n lst.append(' ' + name)\n else:\n lst.append(' %s - %s' % (artist.ljust(6), title))\n\n return lst", "def get_playlists(search_string=None):\n item_type = 'playlists'\n info_dict = spotify.category_playlists(search_string)\n items = info_dict[item_type][\"items\"]\n playlists = []\n for i in range(len(items)):\n playlist_name = items[i][\"name\"]\n owner_name = items[i][\"owner\"][\"display_name\"]\n total_tracks = items[i][\"tracks\"][\"total\"]\n playlist_id = items[i][\"id\"]\n owner_id = items[i][\"owner\"][\"id\"]\n playlists.append({\"Playlist Name\": playlist_name,\n \"Owner Name\": owner_name,\n \"No. of tracks\": total_tracks,\n \"Playlist ID\": playlist_id,\n \"Owner ID\": owner_id\n })\n return playlists", "def make_album(name,album_name,song_num=''):\r\n\tmusic_album={'name':name.title(),'album_name':album_name}\r\n\tif song_num:\r\n\t\tmusic_album['song_num']=song_num\r\n\treturn(music_album)", "def __init__(self, number_players=1000):\n self.player_list = []\n for i in range(number_players):\n self.player_list.append(Player())", "def get_jokes_by_count (count : int , jokes : list ) -> list : \n if count > 0 and count <= 10 : \n return [ choice(jokes) for i in range (count)] \n\n raise Exception(\"count must be > 0 and <= 10 \")", "def get_playlist_songs(self, playlist_id):\n values = {'action' : 'playlist_songs',\n 'filter' : playlist_id,\n }\n root = self.__call_api(values)\n songs = root.getElementsByTagName('song')\n if not songs:\n return None\n l= []\n try:\n for song in songs:\n song_id = int(song.getAttribute('id'))\n song_title = song.getElementsByTagName('title')[0].childNodes[0].data\n artist_id = int(song.getElementsByTagName('artist')[0].getAttribute('id'))\n artist_name = song.getElementsByTagName('artist')[0].childNodes[0].data\n album_id = int(song.getElementsByTagName('album')[0].getAttribute('id'))\n album_name = song.getElementsByTagName('album')[0].childNodes[0].data\n\n song_track = int(song.getElementsByTagName('track')[0].childNodes[0].data)\n song_time = int(song.getElementsByTagName('time')[0].childNodes[0].data)\n song_size = int(song.getElementsByTagName('size')[0].childNodes[0].data)\n\n try: # New Ampache puts nothing here...\n precise_rating = int(song.getElementsByTagName('preciserating')[0].childNodes[0].data)\n except:\n precise_rating = 0\n try:\n rating = float(song.getElementsByTagName('rating')[0].childNodes[0].data)\n except:\n rating = 0\n art = song.getElementsByTagName('art')[0].childNodes[0].data\n url = song.getElementsByTagName('url')[0].childNodes[0].data\n song_dict = {\n 'song_id' : song_id,\n 'song_title' : song_title,\n 'artist_id' : artist_id,\n 'artist_name' : artist_name,\n 'album_id' : album_id,\n 'album_name' : album_name,\n 'song_track' : song_track,\n 'song_time' : song_time,\n 'song_size' : song_size,\n 'precise_rating' : precise_rating,\n 'rating' : rating,\n 'art' : art,\n 'url' : url,\n }\n l.append(song_dict)\n except:\n print(\"This playlist failed\", playlist_id)\n traceback.print_exc()\n return None\n return l", "def _get_top_tracks(artist, limit):\n\n l = []\n for track in _lastfm.get_artist(artist).get_top_tracks(limit=limit):\n track = track.item\n l.append({\"artist\": track.get_artist().get_name(), \"title\": track.get_title()})\n \n return l", "def get_album_tracks(self):\n track_list = self.soup.findAll('div', class_='chart_row')\n number_of_tracks = 0\n titles = []\n urls = []\n track_numbers = []\n \n for track in track_list:\n track_title = re.sub(' Lyrics', '', \" \".join(track.h3.text.split()))\n lyrics_url = track.a['href']\n track_number = track.span.span.text.strip()\n \n if track_number == '':\n # Sometimes there are additional urls that are not a song's lyrics. Skip these.\n continue\n else:\n track_number = int(track_number)\n \n number_of_tracks += 1\n titles.append(track_title)\n urls.append(lyrics_url)\n track_numbers.append(track_number)\n \n if self.song_order:\n # Check that order values are okay.\n for number in self.song_order:\n if number > number_of_tracks:\n raise SongOrderValueError(f'Track number given ({number}) exceeds number of tracks ({number_of_tracks})')\n \n for title, url, number in zip(titles, urls, track_numbers):\n if self.song_order:\n if number not in self.song_order:\n print(f'Skipping song: {number:02d} {title}')\n continue\n \n lyrics = self.get_single_lyrics(url)\n self.album.add_song(Song(title=title, track_number=number, lyrics=lyrics))\n\n self.album.number_of_tracks = number_of_tracks", "def gen_m3u_files(\n query: List[str],\n file_name: Optional[str],\n song_list: List[Song],\n template: str,\n file_extension: str,\n short: bool = False,\n):\n\n # If no file name is provided, use the first list's name\n if not file_name:\n file_name = \"{list[0]}.m3u\"\n\n # If file_name ends with a slash. Does not have a m3u name with extension\n # at the end of the template, append `{list[0]}`` to it\n if (\n file_name.endswith(\"/\")\n or file_name.endswith(r\"\\\\\")\n or file_name.endswith(\"\\\\\\\\\")\n ):\n file_name += \"/{list[0]}.m3u\"\n\n # Check if the file name ends with .m3u\n if not file_name.endswith(\".m3u\"):\n file_name += \".m3u\"\n\n lists = []\n for request in query:\n if \"open.spotify.com\" in request and \"playlist\" in request:\n lists.append(Playlist.create_basic_list(request))\n elif \"open.spotify.com\" in request and \"album\" in request:\n lists.append(Album.create_basic_list(request))\n elif \"open.spotify.com\" in request and \"artist\" in request:\n lists.append(Artist.create_basic_list(request))\n elif request == \"saved\":\n lists.append(Saved.create_basic_list())\n\n if len(lists) == 0 and \"{list\" in template:\n raise ValueError(\n \"You must provide a playlist/album/artist/saved to use {list} in the template.\"\n )\n\n # Create a songs list from the lists and the song_list\n songs_lists = []\n for list_obj in lists:\n songs = []\n for song in song_list:\n if song.url in list_obj.urls:\n songs.append(song)\n\n songs_lists.append((list_obj.name, songs))\n\n if \"{list}\" in file_name:\n for list_name, new_song_list in songs_lists:\n create_m3u_file(\n file_name.format(\n list=list_name,\n ),\n new_song_list,\n template,\n file_extension,\n short,\n )\n elif \"{list[\" in file_name and \"]}\" in file_name:\n create_m3u_file(\n file_name.format(list=[list_name for list_name, _ in songs_lists]),\n song_list,\n template,\n file_extension,\n short,\n )\n else:\n create_m3u_file(\n file_name,\n song_list,\n template,\n file_extension,\n short,\n )", "def get_albums(playlist_name):\n\n playlist_id = find_playlist(playlist_name)\n \n items = get_playlist_tracks(playlist_id=playlist_id)\n \n track_values = []\n \n for item in items:\n track = item['track']\n album = track['album']\n artists = tuple(artist['name'] for artist in album['artists'])\n \n track_values.append((album['name'], artists[0]))\n \n album_details = namedtuple('AlbumDetails', 'album artist')\n \n for tup in dict.fromkeys(track_values):\n yield album_details(*tup)", "def __init__(self, name, plays, number):\n self.name = name\n self.plays = plays\n self.number = number", "def GetSongFilenames():\n\n\t## Loop through each directory\n\tsong_files = []\n\tfor root, dirs, fnames in os.walk(\"_data\\\\fma_small\\\\\"):\n\t\t\n\t\t## Skip the first level\n\t\tif root == \"_data\\\\fma_small\\\\\":\n\t\t\tcontinue\n\n\t\t## Otherwise collect the files, appending\n\t\t## the root path.\n\t\tsong_files += [root+\"\\\\\"+f for f in fnames]\n\n\treturn song_files", "def get_songs_via_title():\n\n title = request.args.get('title')\n\n songs_detail = mod.get_songs_details_via_title(title)\n\n return jsonify(songs_detail)", "def name_list(length, **kwargs):\n return list(itertools.islice(name_supply(**kwargs), length))", "def get_songs(url, daily):\n\ttry:\n\t\tpage = requests.get(url)\n\texcept (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout):\n\t\treturn []\n\n\thtml = BeautifulSoup(page.text, 'html.parser')\n\tresults = []\n\tif daily:\n\t\t# Get just the latest day's group of listings\n\t\tlisting_divs = [html.find('div', class_='episode-music')]\n\telse:\n\t\t# Get all days' listings\n\t\tlisting_divs = html.find_all('div', class_='episode-music')\n\tfor div in listing_divs:\n\t\t# Parse into songs\n\t\tsong_groups = div.find_all('div', class_='episode-music-group')\n\t\t# Divs with additional class \"last\" are the links to amazon; we don't want those\n\t\tlast_divs = div.find_all('div', class_='last')\n\t\tsong_listings = [song for song in song_groups if song not in last_divs]\n\n\n\t\tfor song in song_listings:\n\t\t\ttitle = song.find('a', class_='episode-music-title').text.encode('utf8')\n\t\t\tartist = song.find('div', class_='episode-music-artist').text.encode('utf8')\n\t\t\tresults.append({'title': title, 'artist': artist})\n\t\t\tlogging.debug('get_songs: found song {0} by {1}'.format(title, artist))\n\treturn results", "async def top_10_specs(self):\r\n players = await self.get_players()\r\n specs = []\r\n for player in players:\r\n specs.append(player['specId'])\r\n del specs[10:]\r\n await ctx.message.channel.send('Top 10 3v3 Composition:')\r\n for key in self.specs:\r\n if specs.count(int(key)) > 0:\r\n await ctx.message.channel.send('{:s}: {:d}'.format(self.specs[key], specs.count(int(key))))", "def get_album_songs(self, album_id):\n url = get_album_url(album_id)\n result = self.get_request(url)\n\n return result['album']['songs']", "def top_ten_movies(path):\n content = open(path, \"r\")\n topten = []\n for x in content:\n topten.append(x) \n return topten", "def getSongsFromAlbum(albumLink):\n albumLink = str(albumLink)\n try:\n html = urllib.request.urlopen(albumLink).read()\n soup = bs4.BeautifulSoup(html, 'html.parser')\n table = soup.findAll(\"a\")[5:]\n songLinks = []\n for entry in table:\n text = str(re.findall(\"\\\".*\\\"\", str(entry)))\n text = re.sub(\"[\\]\\['\\\"]\", \"\", text)\n link = albumLink + str(text)\n songLinks.append(link)\n except:\n return []\n return songLinks", "def GetRecentAlbums(self, limit=5):\n self.logger.debug(\"Fetching recently added Music\")\n try:\n xbmc = Server(self.url('/jsonrpc', True))\n properties = ['artist', 'albumlabel', 'year', 'description', 'thumbnail']\n limits = {'start': 0, 'end': int(limit)}\n return xbmc.AudioLibrary.GetRecentlyAddedAlbums(properties=properties, limits=limits)\n except:\n self.logger.error(\"Unable to fetch recently added Music!\")\n return", "def next_random(self):\n query = \"\"\"SELECT id, filename, score FROM caro_song WHERE score >= 0 ORDER by played ASC, score DESC, uniq ASC LIMIT 1\"\"\"\n rows = self.fetchfile(query)\n return rows" ]
[ "0.7061848", "0.6690603", "0.65612096", "0.6520877", "0.6450587", "0.64253664", "0.62649524", "0.6210138", "0.61440945", "0.6127966", "0.612063", "0.61182624", "0.6108939", "0.60201514", "0.60189235", "0.60095435", "0.597353", "0.59719634", "0.59651035", "0.59640324", "0.5957734", "0.5895927", "0.5882713", "0.58421034", "0.58399004", "0.577564", "0.5774631", "0.5767498", "0.5745667", "0.57395387", "0.5731332", "0.5730224", "0.5726925", "0.57092714", "0.5693997", "0.568099", "0.5680929", "0.56785095", "0.56418604", "0.56397814", "0.56319857", "0.5623491", "0.56095856", "0.55975974", "0.5594274", "0.55890024", "0.5579438", "0.5577046", "0.55590826", "0.5556549", "0.5549676", "0.5546307", "0.5543833", "0.552725", "0.5484252", "0.54826486", "0.5469877", "0.5451938", "0.54489", "0.54433733", "0.543634", "0.54313624", "0.5419036", "0.5415382", "0.54052305", "0.5385216", "0.5380929", "0.5377741", "0.5370171", "0.5369089", "0.5365695", "0.53615063", "0.5358103", "0.5350885", "0.5349051", "0.5343761", "0.5341859", "0.533429", "0.53265154", "0.5313945", "0.5312832", "0.53092766", "0.53008217", "0.52962196", "0.5292827", "0.52920103", "0.52874887", "0.5286293", "0.5282541", "0.5275928", "0.5269258", "0.52676463", "0.5263759", "0.52489644", "0.52466816", "0.52443105", "0.5243605", "0.5242521", "0.523974", "0.52360785" ]
0.6639254
2
[list all spotify playlists on users account]
def list_playlists(self, user=None): if user: playlists = self.sp.user_playlists(user)['items'] else: playlists = self.sp.user_playlists(self.user)['items'] pl_names = [pl['name'] for pl in playlists] pl_id = [pl['id'] for pl in playlists] pl_own = [pl['owner']['id'] for pl in playlists] return pl_names, pl_id, pl_own
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_playlists_for_user(self, request): \n user = Account.find_by_id(request.userid)\n playlists = Playlist.find_by_owner(user.key).fetch(20)\n return self.build_playlist_response(playlists)", "def current_user_playlists(self, limit: int = 20, offset: int = 0):\n return self._get('me/playlists', limit=limit, offset=offset)", "def current_user_playlists(self, limit=50, offset=0, **kwargs):\n return self._get(API.MY_PLAYLISTS.value, limit=limit, offset=offset, **kwargs)", "def playlists(self, user_id: str, limit: int = 20, offset: int = 0):\n return self._get(f'users/{user_id}/playlists',\n limit=limit, offset=offset)", "def get_playlists_for_user_by_name(self, request): \n user = Account.find_by_username(request.username)\n playlists = Playlist.find_by_owner(user.key).fetch(20)\n return self.build_playlist_response(playlists)", "def print_user_playlists(sp, user_uri):\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n \n playlists = sp.user_playlists(user_uri) \n\n while playlists:\n for i, playlist in enumerate(playlists['items']):\n print(\"%4d %s %s\" % (i + 1 + playlists['offset'], playlist['uri'], playlist['name']))\n if playlists['next']:\n playlists = sp.next(playlists)\n else:\n playlists = None", "def get_user_playlists(user_id, authorizer, verbose=False):\n spotify_endpoint = 'https://api.spotify.com/v1/users/{user_id}/playlists'\n\n # there's a limit to the number of playlists that can be downloaded at a time\n # keep downloading playlists until we run out (next = null)\n playlists = {'items':None} \n while True:\n params = {'limit': 50}\n headers = {\"Accept\":\"application/json\", \"Content-Type\":\"application/json\", \"Authorization\": \"Bearer {bearer}\".format(bearer=authorizer.bearer)}\n response = requests.get(spotify_endpoint.format(user_id=user_id), headers=headers, params=params)\n \n if response.status_code == 200:\n data = response.json()\n if playlists['items'] is None:\n playlists['items'] = data['items']\n else:\n playlists['items'] += data['items']\n \n if data['next'] is None:\n return playlists ## look here for how we get out! ##\n else:\n spotify_endpoint = data['next']\n elif response.status_code == 429:\n limit = int(response.headers['Retry-After'])\n print('Hit rate limit, waiting for {} seconds to continue'.format(limit))\n time.sleep(limit)\n elif response.status_code == 404:\n print('Error. User {user_id} not found.'.format(user_id=user_id))\n return None\n elif response.status_code == 401:\n print('Access token expired, refreshing...')\n authorizer.refresh()\n headers = {\"Accept\":\"application/json\", \"Content-Type\":\"application/json\", \"Authorization\": \"Bearer {bearer}\".format(bearer=authorizer.bearer)}\n else:\n print('Error %d' % response.status_code)\n if verbose:\n print(json.loads(response.text)['error']['message'])\n return None", "def get_playlists(search_string=None):\n item_type = 'playlists'\n info_dict = spotify.category_playlists(search_string)\n items = info_dict[item_type][\"items\"]\n playlists = []\n for i in range(len(items)):\n playlist_name = items[i][\"name\"]\n owner_name = items[i][\"owner\"][\"display_name\"]\n total_tracks = items[i][\"tracks\"][\"total\"]\n playlist_id = items[i][\"id\"]\n owner_id = items[i][\"owner\"][\"id\"]\n playlists.append({\"Playlist Name\": playlist_name,\n \"Owner Name\": owner_name,\n \"No. of tracks\": total_tracks,\n \"Playlist ID\": playlist_id,\n \"Owner ID\": owner_id\n })\n return playlists", "def user_playlists(self, user, limit=50, offset=0, **kwargs):\n # pylint: disable=no-member\n return self._get(\n API.PLAYLISTS.value.format(user_id=user),\n limit=limit,\n offset=offset,\n **kwargs,\n )", "def getplaylists(accesstoken, userid):\n \n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n\n limit = 50\n\n payload = {}\n payload[\"limit\"] = limit\n payload[\"offset\"] = 0\n\n r = requests.get(\"https://api.spotify.com/v1/me/playlists\",\n headers=headers, \n params=payload)\n\n # print('url = \\n\\n {} \\n\\n'.format(r.url))\n\n response = r.json()\n\n # add data to playlist objects\n if \"items\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait for the amount of time specified in response header\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n # try again\n return(getplaylists(accesstoken, userid))\n\n else:\n print(response[\"error\"])\n return(None)\n else:\n print('error: getplaylists request failed')\n return(None)\n\n numberreceived = len(response[\"items\"])\n totalavailable = response[\"total\"]\n\n playlists = OrderedDict()\n\n for playlist in response[\"items\"]:\n p = Playlist()\n p.images = playlist[\"images\"]\n p.name = playlist[\"name\"]\n p.playlistid = playlist[\"id\"]\n p.ownerid = playlist[\"owner\"][\"id\"]\n playlists[p.name] = p\n\n # if number received less than total available, request more\n while numberreceived < totalavailable:\n # print(\"received={} available={}\".format(numberreceived, totalavailable))\n payload[\"offset\"] = payload[\"offset\"] + limit\n r = requests.get(\"https://api.spotify.com/v1/me/playlists\",\n headers=headers, \n params=payload)\n response = r.json()\n\n if \"items\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait for the amount of time specified in response header\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n # try again\n continue\n else:\n print('error: getplaylists request failed')\n print(response[\"error\"])\n return(None)\n else:\n return(None)\n\n for playlist in response[\"items\"]:\n p = Playlist()\n p.images = playlist[\"images\"]\n p.name = playlist[\"name\"]\n p.playlistid = playlist[\"id\"]\n playlists[p.name] = p\n\n numberreceived = numberreceived + len(response[\"items\"])\n\n return(playlists)", "def test_list_playlists_by_random_logged_in_user(self):\n user = factories.UserFactory()\n factories.PlaylistFactory()\n\n jwt_token = UserAccessTokenFactory(user=user)\n\n response = self.client.get(\n \"/api/playlists/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json()[\"count\"], 0)\n self.assertEqual(response.json()[\"results\"], [])", "def list_playlists(self):\n endpoint = '/me/playlists'\n url = f'{self.api_base_url}{endpoint}'\n\n req = requests.get(url, headers=self.__header_bearer())\n if req.status_code == 200:\n items = req.json()\n if 'items' in items:\n return [[item['name'], item['id']] for item in items['items']]\n return False", "def getPlaylists():\n\n allPlaylistData = []\n\n spotifyPlaylistData = crud.getPlaylists(session)\n if 'items' in spotifyPlaylistData:\n allPlaylistData = spotifyPlaylistData['items']\n \n savedPlaylistIDs = crud.getSavedPlaylistIDsByUser(int(session['user_id']))\n\n regPlaylistData = [i for i in allPlaylistData if i['id'] not in savedPlaylistIDs]\n savedPlaylistData = [i for i in allPlaylistData if i['id'] in savedPlaylistIDs]\n\n data = {\n 'regPlaylistData': regPlaylistData,\n 'savedPlaylistData': savedPlaylistData\n }\n \n return data", "def ls():\n if not g.userpl:\n g.message = F('no playlists')\n g.content = g.content or generate_songlist_display(zeromsg=g.message)\n\n else:\n g.content = playlists_display()\n g.message = F('pl help')", "def user_playlists(video):\n html = render_template('playlist-menu.html', user=g.user, video=video)\n if request.is_xhr:\n return jsonify(html=html, message_type='success', action='append')\n else:\n return html", "def get_playlists(self):\n if self.youtube is None:\n self.youtube = __get_client()\n return self.youtube.playlists().list(part=\"snippet\", mine=True)\\\n .execute()", "def playlists(self):\r\n return v3.Playlists(self)", "def get_playlists(self):\n values = {\n 'action' : 'playlists',\n }\n root = self.__call_api(values)\n nodes = root.getElementsByTagName('playlist')\n if not nodes: # list is empty, reauth\n return None\n\n l = []\n try:\n for child in nodes:\n id = int(child.getAttribute('id'))\n name = child.getElementsByTagName('name')[0].childNodes[0].data\n owner = child.getElementsByTagName('owner')[0].childNodes[0].data\n items = int(child.getElementsByTagName('items')[0].childNodes[0].data)\n type = child.getElementsByTagName('type')[0].childNodes[0].data\n\n d = {\n 'id' : id,\n 'name' : name,\n 'items' : items,\n 'owner' : owner,\n 'type' : type,\n }\n l.append(d)\n except: #something failed\n traceback.print_exc()\n return []\n return l", "def scrape(self):\n\n request = self.youtube.playlists().list(\n part=\"id,snippet\",\n channelId=self.channelId,\n maxResults=50)\n response = request.execute()\n items = response['items']\n for item in items:\n self.playlists.append(item['id'])\n\n if 'nextPageToken' in response:\n self.nextPageToken = response['nextPageToken']\n\n while self.nextPageToken:\n request = self.youtube.playlists().list(part=\"id,snippet\", channelId=self.channelId,\n maxResults=50, pageToken=self.nextPageToken)\n response = request.execute()\n for item in items:\n self.playlists.append(item['id'])\n\n if 'nextPageToken' in response:\n self.nextPageToken = response['nextPageToken']\n else:\n self.nextPageToken = ''\n\n return self.playlists", "def select_playlists(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM playlists\")\n \n rows = cur.fetchall()\n \n for row in rows:\n print(row)", "def playlists(self):\n return self._playlists", "def getAllPlaylists(self,name):\n return [p for p in self.playlists if p.title == name]", "def get_playlist_contents(playlist_id, user_id, limit=100):\n\ttoken = get_token()\n\theaders = {'Authorization': 'Bearer ' + token}\n\tbase_url = SPOTIFY_API_HOST + 'users/{0}/playlists/{1}/tracks?limit={2}'\n\turl = base_url.format(SPOTIFY_USER_ID, SPOTIFY_PLAYLIST_ID, limit)\n\tresponse = requests.get(url, headers=headers).json() # Todo: Handle errors here. Not using this function so ok for now.\n\n\turis = []\n\tfor item in response['items']:\n\t\turi_string = item['track']['uri']\n\t\turis.append(uri_string[uri_string.rfind(':')+1:])\n\treturn uris", "async def set_playlists(self):\n _LOGGER.debug(\"[Foobar2k] Getting playlists\")\n if (self._power == POWER_ON):\n playlists = {}\n response = await self.prep_fetch(HTTP_GET, GET_PLAYLISTS, data=None)\n data = json.loads(response)\n _LOGGER.debug(f\"[Foobar2k] Have playlists [{data}]\")\n for pl in data[\"playlists\"]:\n playlists[pl[\"title\"]] = pl[\"id\"]\n if (pl[\"isCurrent\"]):\n self._current_playlist_id = pl[\"id\"]\n self._playlists = playlists", "async def _async_update_playlists(self, **kwargs):\n self._playlists = await self._volumio.get_playlists()", "def get_lists(user):\n list_options = {}\n list_objects = twitter.lists_all(screen_name=user)\n for list_ in list_objects:\n list_options[list_.id] = list_.name\n return list_options.items()", "def playlists_display():\n if not g.userpl:\n g.message = F(\"no playlists\")\n return logo(c.y) + \"\\n\\n\" if g.model.is_empty else \\\n generate_songlist_display()\n\n maxname = max(len(a) for a in g.userpl)\n out = \" {0}Local Playlists{1}\\n\".format(c.ul, c.w)\n start = \" \"\n fmt = \"%s%s%-3s %-\" + str(maxname + 3) + \"s%s %s%-7s%s %-5s%s\"\n head = (start, c.b, \"ID\", \"Name\", c.b, c.b, \"Count\", c.b, \"Duration\", c.w)\n out += \"\\n\" + fmt % head + \"\\n\\n\"\n\n for v, z in enumerate(sorted(g.userpl)):\n n, p = z, g.userpl[z]\n l = fmt % (start, c.g, v + 1, n, c.w, c.y, str(p.size), c.y,\n p.duration, c.w) + \"\\n\"\n out += l\n\n return out", "def test_list_playlists_by_anonymous_user(self):\n factories.PlaylistFactory()\n response = self.client.get(\"/api/playlists/\")\n self.assertEqual(response.status_code, 401)", "def hello():\n plays = filter(lambda p: p.user == users.get_current_user(), Play.all())\n print(plays)\n logout_url = users.create_logout_url('/home')\n logout_url_linktext = 'Logout'\n login_url = users.create_login_url('/home')\n login_url_linktext = 'Login'\n plays = sorted(plays, key=lambda x: x.time)\n return render_template('list_plays.html', plays=plays, logout_url=logout_url, logout_url_linktext=logout_url_linktext, login_url=login_url, login_url_linktext=login_url_linktext)", "def playlist_search(search_term, results=5):\r\n if search_term:\r\n url = PLAYLIST_SEARCH_URL.format(API_KEY, util.web.quote(search_term.encode('ascii', 'ignore')))\r\n response = util.web.http_get(url=url, json=True, referer='https://tinychat.com')\r\n\r\n if response['json'] is not None:\r\n play_lists = []\r\n try:\r\n if 'items' in response['json']:\r\n for i, item in enumerate(response['json']['items']):\r\n if i == results:\r\n return play_lists\r\n playlist_id = item['id']['playlistId']\r\n playlist_title = item['snippet']['title'].encode('ascii', 'ignore')\r\n play_list_info = {\r\n 'playlist_title': playlist_title,\r\n 'playlist_id': playlist_id\r\n }\r\n play_lists.append(play_list_info)\r\n except KeyError as ke:\r\n log.error(ke, exc_info=True)\r\n return None", "def get_playlist_tracks(user, playlist_id, limit=100):\n info_dict = spotify.user_playlist_tracks(user, playlist_id, limit=limit)\n items = info_dict[\"items\"]\n tracks = []\n for i in range(len(items)):\n album_name = items[i][\"track\"][\"album\"][\"name\"]\n album_type = items[i][\"track\"][\"album\"][\"album_type\"]\n artists_names = ', '.join([\n items[i][\"track\"][\"artists\"][index][\"name\"]\n for index in range(len(items[i][\"track\"][\"artists\"]))\n ])\n track_name = items[i][\"track\"][\"name\"]\n popularity = items[i][\"track\"][\"popularity\"]\n track_id = items[i][\"track\"][\"id\"]\n tracks.append({\"Album Name\": album_name,\n \"Album Type\": album_type,\n \"Artist(s)\": artists_names,\n \"Track Name\": track_name,\n \"Popularity\": popularity,\n \"Track ID\": track_id\n })\n tracks.sort(key=lambda d: d['Popularity'], reverse=True)\n return tracks", "async def get_user_playlists(\n self, user: discord.User, partial: bool = False\n ) -> List[UserPlaylist]:\n\n user_playlist_ids = await self.database.select(\n self.tables[\"playlists\"], [\"id\"], {\"user\": user.id}, True\n )\n\n return list(\n await asyncio.gather(\n *[\n self.get_playlist(user, user_playlist_id[\"id\"], partial)\n for user_playlist_id in user_playlist_ids\n ]\n )\n )", "def test_display_playlists(self):\n\n result = self.client.get(\"/playlists\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"</i> Your Playlists</h1>\", result.data)\n self.assertIn(b\"120</option>\", result.data) # BPM\n self.assertIn(b\"Happy</option>\", result.data) # Mood of tracks", "def getplaylisttracks(accesstoken, chosenplaylist):\n\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n\n limit = 100\n\n payload = {}\n payload[\"limit\"] = limit\n payload[\"offset\"] = 0\n\n r = requests.get(\n \"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\".format(chosenplaylist.ownerid, chosenplaylist.playlistid),\n headers=headers,\n params=payload)\n\n response = r.json()\n\n if \"items\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait for the amount of time specified in response header\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n # try again\n return(getplaylisttracks(accesstoken, chosenplaylist, userid))\n else:\n print(response[\"error\"])\n return(None)\n else:\n print('error: getplaylisttracks request failed')\n return(None)\n\n numberreceived = len(response[\"items\"])\n totalavailable = response[\"total\"]\n\n for track in response[\"items\"]:\n t = Track()\n t.trackid = track[\"track\"][\"id\"]\n t.albumname = track[\"track\"][\"album\"][\"name\"]\n t.trackname = track[\"track\"][\"name\"]\n t.artistname = track[\"track\"][\"artists\"][0][\"name\"]\n t.popularity = track[\"track\"][\"popularity\"]\n # print(t.trackid, t.trackname, t.artistname, t.albumname)\n chosenplaylist.tracks.append(t)\n\n # if we haven't gotten all of the tracks in the playlist, request the next\n # batch\n\n while numberreceived < totalavailable:\n\n payload[\"offset\"] = payload[\"offset\"] + limit\n r = requests.get(\n \"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\".format(chosenplaylist.ownerid, chosenplaylist.playlistid),\n headers=headers,\n params=payload)\n response = r.json()\n\n if \"items\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait for the amount of time specified in response header\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n # try again\n continue\n else:\n print('error: getplaylisttracks request failed')\n print(response[\"error\"])\n return(None)\n else:\n print('error: unknown error')\n return(None)\n\n for track in response[\"items\"]:\n if track[\"is_local\"]:\n # a locally saved song. skip over it, as no way to query audio \n # features without having a spotify track id\n continue\n t = Track()\n t.trackid = track[\"track\"][\"id\"]\n t.albumname = track[\"track\"][\"album\"][\"name\"]\n t.trackname = track[\"track\"][\"name\"]\n t.artistname = track[\"track\"][\"artists\"][0][\"name\"]\n # print(t.trackid, t.trackname, t.artistname, t.albumname)\n chosenplaylist.tracks.append(t)\n\n \n numberreceived = numberreceived + len(response[\"items\"])\n\n # filter out tracks with trackid == None\n chosenplaylist.tracks = [track for track in chosenplaylist.tracks if track.trackid is not None]\n\n # print(chosenplaylist.tracks)\n return(chosenplaylist)", "def user_list(ctx):\n data = ctx.obj.get_all_users()\n output_json_data(data)", "def list_users(item):\n users = User.load_all_users(item)\n for user in users:\n print(user.username)", "def get_user_lists(user):\n if not user: return []\n memberships = db.Query(TaskListMember).filter('user =', user)\n return [m.task_list for m in memberships]", "def spotify_tracklist():\n sp = credentials()\n chart = chartdata()\n trackid_list = []\n #find a way to get track IDS\n for track in chart:\n searchQuery = track[0]\n searchResults = sp.search(q=searchQuery, limit=1, type='track', market=\"US\")\n trackid_list.append(searchResults['tracks']['items'][0]['uri'])\n return trackid_list", "def users():\n access_token = session['access_token']\n return \"%s\" % list_users(access_token)", "def list():\n rino.login.list()", "def getUserSonglistList(self, uid = 0, offset = 0, limit = 1000):\n #get current function name getUserSonglistList\n #URL_NEAPIS includes its URL and API, stored in key named after this function\n currAPIVersion = self.config['apiVersion']\n currAPIURL = URL_NEAPIS[sys._getframe().f_code.co_name]\n currAPIURL = currAPIURL[min(currAPIVersion, len(currAPIURL) - 1)]\n currDict = {\n 'offset' : offset,\n 'limit' : limit,\n 'uid' : uid,\n }\n\n currC, currR = self._mySubmit(currAPIURL, currDict)\n self.apiLog.info(\"%s Json Loads Begin\", sys._getframe().f_code.co_name)\n currR = json.loads(currR)\n self.apiLog.info(\"%s Json Loads End\", sys._getframe().f_code.co_name)\n self.updateCookie(currC)\n self.checkCode(currR['code'])\n\n return currR, currAPIURL[2]", "async def playlist(self, ctx, *, query):\n # Setup the headers with the token that should be here\n headers = {\"Authorization\": \"Bearer {}\".format(self._token)}\n opts = {\"q\": query, \"type\": \"playlist\"}\n url = \"https://api.spotify.com/v1/search\"\n response = await utils.request(url, headers=headers, payload=opts)\n try:\n await ctx.send(\n response.get(\"playlists\")\n .get(\"items\")[0]\n .get(\"external_urls\")\n .get(\"spotify\")\n )\n except (KeyError, AttributeError, IndexError):\n await ctx.send(\"Couldn't find a song for:\\n{}\".format(query))", "def grab_playlist():\n sp = credentials()\n playlists = sp.current_user_playlists()\n for playlist in playlists['items']:\n if playlist['name'] == 'Billboard Hot 100':\n playlist_id = playlist['uri']\n return playlist_id", "def spark_list():\n api.list()", "def get_playlists(self):\n request = self._gen_playlists_request()\n playlists = request.execute()\n logger.debug(playlists)\n\n results = [\n {\n 'title': playlist['snippet']['title'],\n 'videos': self.get_playlist(playlist['id'])\n } for playlist in playlists['items']\n ]\n return results", "def get_current_user_lists():\n return TaskList.get_user_lists(users.get_current_user())", "def do_user_list(cs, args):\n _, users = cs.users.list()\n fields = ['user_id', 'username', 'email', 'realname', 'comment']\n utils.print_list(users, fields, sortby=args.sortby)", "async def do_playerlist():\n\n download = urllib.request.urlopen(server_api2)\n data = json.loads(download.read())\n player_list = []\n try:\n for i in data['players']['sample']:\n player_list.append(i['name'])\n except KeyError:\n if data['online'] == False:\n await bot.send_message(c, 'Failed. The server is offline.')\n return\n else:\n await bot.send_message(c, 'There are no players online.')\n return\n string = ''\n for i in player_list:\n string += '{}, '.format(i)\n await bot.send_message(c, string)", "def list_pl_songs(self, pl_id, user=None):\n if user:\n res = self.sp.user_playlist_tracks(user, pl_id)\n else:\n res = self.sp.user_playlist_tracks(self.user, pl_id)\n song_uri_ls = [song['track']['uri'] for song in res['items']]\n song_ls = []\n for i, song in enumerate(res['items']):\n song_ls.append([i,\n song['track']['name'][0:20].strip(),\n song['track']['album']['name'][0:20].strip(),\n \"%0.2f\" % (song['track']['duration_ms'] / 60000),\n song['track']['popularity']])\n return song_uri_ls, song_ls", "def add_tracks():\n sp = credentials()\n tracks = spotify_tracklist()\n playlist_id = grab_playlist()\n sp.user_playlist_add_tracks('truetiming', playlist_id, tracks)", "def get_playlists(self, playlist_uuids, *args):\n\n playlists = []\n for playlist_uuid in playlist_uuids:\n status = self.get_playlist(playlist_uuid)\n if status['status'] != 0:\n return status # Error getting the playlist\n\n # Discard the other information and keep the playlist object :)\n playlists.append(status['playlist'])\n\n rsp = rsp_codes[0]\n rsp['playlists'] = playlists\n return rsp", "def _list(room_name):\n members = redis.smembers(room_name)\n \n if str(members) == 'set()':\n text = '```Users in list: none```'\n return text\n\n text = 'Users in list: %s ' % ','.join(members)\n \n return text", "async def list(self, ctx):\n cyphon = discord.utils.get(ctx.message.server.members, id=\"186835826699665409\")\n\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n message = []\n message.append(\"```\\n\")\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n if len(self.twitch_streams) > 0:\n for stream in self.twitch_streams:\n message.append(stream[\"NAME\"] + \"\\n\")\n else:\n message.append(\"No streams found!\")\n message.append(\"```\")\n output = ''.join(message)\n await self.bot.say(output)\n else:\n await self.bot.send_message(ctx.message.author, \"You don't have permission to execute that command.\")", "async def tod_list(self, ctx, *args):\n message = \"__Currently Playing__\\n\"\n if len(self.players) == 0:\n message = \"There are currently no users playing.\"\n for player in self.players:\n message += f\"> {str(player)[:-5]}\\n\"\n await ctx.send(message)", "def get_all_playlist_videos( playlistURL ):\r\n \r\n request = youtube.playlistItems().list(\r\n part=\"contentDetails,id,snippet\",\r\n maxResults=50,\r\n playlistId=\"PLxgoClQQBFjgTMrhvedWk8Q_CVLWwy3ak\"\r\n )\r\n response = request.execute()", "def get_users(users_list):\n formatted_list = \",\".join(users_list)\n built_get = \"%s&steamids=%s\" % (GETPLAYERSUMM_URL, formatted_list)\n try:\n req = requests.get(built_get)\n if req.status_code == 200:\n response = json.loads(req.text)[\"response\"][\"players\"]\n players = {}\n for player in response:\n players[player[\"steamid\"]] = player\n return players\n except requests.exceptions.RequestException as req_exc:\n print(req_exc)\n\n return []", "def get_playlists(section):\n global parent_folder\n # 초기값은 빈 값\n print(f\" getting playlists for section: {section['title']}\")\n # section의 인자로 받은 title 출력\n soup = get_soup(section['link'])\n # section의 인자로 받은 link를 get_soup 함수 실행\n # i.e. https://www.youtube.com//user/gjenkinslbcc/playlists?view=50&sort=dd&shelf_id=2\n if soup is None: # no playlist, create dummy with default link\n url = f'{youtube_base}{parent_folder}{channel_name}/videos'\n return [\n {'title': 'No Playlists', 'link':url }]\n # soup값이 없을 시 [{'title': 'No Playlists', 'link':url }] return\n atags = soup('a', class_='yt-uix-tile-link')\n # a태그의 class가 'yt-uix-tile-link'인 값 파싱\n\n playlists = []\n for a in atags:\n title = a.text\n # atags를 통해 모든 플레이리스트의 title값 파싱\n if title != 'Liked videos': # liked videos 제외\n url = fix_url(a['href'])\n # atags를 통해 모든 플레이리스트 url값 파싱\n playlists.append({'title': title, 'link': url})\n #playlists에 모든 값 저장\n\n if not playlists: # 플레이리스트 없을 시\n url = f'{youtube_base}/{parent_folder}{channel_name}/videos'\n return [{'title': 'No Playlists', 'link': url}]\n\n return playlists", "def playlist(self, playlist_id: str, fields: str = None,\n market: str = 'from_token'):\n return self._get('playlists/' + playlist_id,\n fields=fields, market=market)", "def get_playlist_items(self):\n results = self.API.playlist(self.playlist_uri)\n return results[\"tracks\"][\"items\"]", "def user_list(self, mapp, url_of_liveserver):\n return mapp.getjson(url_of_liveserver)['result'].keys()", "def playlist(self):\n _LOGGER.debug(\"Fetching Playlist info\")\n parameters = {\n 'cmd': None,\n 'param3': 'playlist.json'\n }\n try:\n res = requests.get(url=self.url, headers=headers, params=parameters, timeout=self.timeout).json()\n except (ConnectionError, OSError) as e:\n _LOGGER.error(\"Fetching playlist info failed: %s\", e)\n res = None\n return res", "def get_playlist_uuids(self, *args):\n\n rsp = rsp_codes[0]\n rsp['playlist_uuids'] = self.playlists.keys()\n return rsp", "def user_playlist_tracks(\n self,\n playlist_id,\n fields=None,\n limit=100,\n offset=0,\n market=\"from_token\",\n **kwargs,\n ):\n _id = self._get_playlist_id(playlist_id)\n # pylint: disable=no-member\n return self._get(\n API.PLAYLIST_TRACKS.value.format(playlist_id=_id),\n limit=limit,\n offset=offset,\n fields=fields,\n market=market,\n **kwargs,\n )", "def user_list():\n\n users = User.query.all()\n \n return render_template(\"user_list.html\", users=users)", "def readSavedTracks(self)->'list':\n scope = 'user-library-read'\n self.saved_tracks_list = []\n self.sp_data = self.sp_client.Connect(scope)\n \n \n if self.sp_client.isConnected() == True:\n print('We are connected to Spotify!!!!')\n\n try:\n\n tracks_index = self.sp_data.current_user_saved_tracks()\n #adding tracks to the list\n self.saved_tracks_list = tracks_index['items']\n while tracks_index['next']:\n #reading and adding the Next tracks into the tracks list \n self.saved_tracks_list += self.sp_data.next(tracks_index)['items']\n # increasing the index to the correct placxe\n tracks_index = self.sp_data.next(tracks_index)\n \n self.isSavedTracksAvailable = True\n\n except ImportError:\n raise ImportError('There was a problem reading all the track list!!')\n \n else:\n print('Failed to connect to Spotify')\n self.isSavedTracksAvailable = False\n\n return self.saved_tracks_list", "def get_playlist_info(self, username, playlist_name):\n playlist_info = []\n playlist_id = self.get_playlist_id(username, playlist_name)\n playlist_items = self.spotify.playlist_tracks(playlist_id)\n for i in range(len(playlist_items['items'])):\n print(playlist_items['items'][i])\n playlist_info.append([playlist_items['items'][i]['track']['name'], \n playlist_items['items'][i]['track']['artists'][0]['name'],\n playlist_items['items'][i]['track']['album']['name']])\n while playlist_items['next']: # If there are more tracks\n playlist_items = self.spotify.next(playlist_items)\n for i in range(len(playlist_items['items'])):\n playlist_info.append([playlist_items['items'][i]['track']['name'], \n playlist_items['items'][i]['track']['artists'][0]['name'],\n playlist_items['items'][i]['track']['album']['name']])\n return playlist_info", "def create_playlist(access_token):\n request_body = json.dumps({\n \"name\": \"SpotiAdd\",\n \"description\": \"All Liked Youtube Videos\",\n \"public\": True\n })\n userId = getUserId(access_token)\n query = \"https://api.spotify.com/v1/users/{}/playlists\".format(\n userId)\n response = requests.post(\n query,\n data=request_body,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(access_token)\n }\n )\n response_json = response.json()\n # print(\"create_playlist_id : {}\".format(response_json),file = sys.stdout)\n return response_json[\"id\"]", "def user_list():\n\n users = User.query.all()\n\n return render_template(\"user_list.html\", users=users)", "def do_list(client, args):\n\trepos = client.repos.list(args.user)\n\tprint '%s has the following repositories:' % args.user\n\tprint 'Name - Description'\n\tfor repo in repos:\n\t\tprint '%s - %s' % (repo.name, repo.description)", "async def list(self, ctx, user: discord.Member=None):\n\n author = ctx.message.author\n\n if not user:\n user = author\n\n game_list = get_library()\n\n if check_key(user.id) and game_list.get(user.id).get(\"games\", False):\n user_game_list = get_library(user.id)\n\n message = pagify(\", \".join(sorted(user_game_list)), [', '])\n\n await self.bot.say(\"Please check your DM for the full list of games, {}.\".format(author.mention))\n await self.bot.send_message(author, \"{}'s games:\".format(user.mention))\n\n for page in message:\n await self.bot.send_message(author, (box(page)))\n else:\n await self.bot.say(\"{}, you do not have any games. Add one using `{p}game add <game_name>` and/or link your Steam profile with `{p}game steamlink <steam_id>`.\".format(user.mention, p=ctx.prefix))", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def view_shoppinglists(current_user):\n\n limit = request.args.get('limit', 10)\n q = request.args.get('q', None)\n page = int(request.args.get('page', 1))\n\n results = []\n shoplists = ShoppingList.query.filter_by(\n user_id=current_user.id)\n\n if q is not None:\n q = q.lower()\n shoplists = shoplists.filter(\n ShoppingList.name.like(\"%\" + q.strip() + \"%\"))\n # search_count = ShoppingList.query.filter_by(\n # user_id=current_user.id).filter(\n # ShoppingList.name.like(\"%\" + q.strip() + \"%\")).count()\n # # print('search: '+search_count)\n\n\n if limit:\n try:\n if int(limit):\n shoplists = shoplists.filter_by(\n user_id=current_user.id).paginate(page=page,\n per_page=int(\n limit), error_out=False).items\n\n for shoplist in shoplists:\n results.append(shoplist.json())\n\n if len(results) == 0:\n return response('failed', 'Shopping list not found', 404)\n return get_response('ShoppingLists', results, count=ShoppingList.query.filter_by(\n user_id=current_user.id).count(), page=page, limit=limit)\n\n except ValueError:\n return response('failed', 'Limit should be an integer', 400)\n\n for shoplist in shoplists.all():\n results.append(shoplist.json())\n\n if len(results) == 0:\n print(len(results))\n return response('failed', 'Shopping list not found', 404)\n return get_response('ShoppingLists', results, count=ShoppingList.query.filter_by(\n user_id=current_user.id).count, page=page, limit=limit)", "def user_list():\n\n users = User.query.all()\n return render_template(\"/user_list.html\", users=users)", "def list_users():\n check_admin()\n results = User.query.order_by(-User.id)\n return render_template('user_list.html', users=results)", "def GetPlaylists(self):\n return self.__playlists.copy()", "def list(ctx):\n if ctx.obj.get('NAMESPACE') != 'accounts':\n click.echo(\n click.style('Only account data is available for listing.', fg='red')\n )\n return\n\n swag = create_swag_from_ctx(ctx)\n accounts = swag.get_all()\n _table = [[result['name'], result.get('id')] for result in accounts]\n click.echo(\n tabulate(_table, headers=[\"Account Name\", \"Account Number\"])\n )", "def list(self, user_ids: Optional[List[UserId]]) -> List[U]:\n ...", "def cmd_pagetplaylists(self, data, client, cmd):\n for n, p in sorted(self._playlists.iteritems()):\n cmd.sayLoudOrPM(client, '%s - %s' % (n, p))\n time.sleep(1)", "def playListMode(self):\n print(\"[b blue]PlayList mode[/]\")\n while True:\n url = qr.text(\n \"Enter the URL for the PlayList:\", qmark=\"*\", multiline=True\n ).ask()\n\n try:\n requests.get(url)\n # resp.status_code\n except requests.ConnectionError as e:\n print(e)\n return\n else:\n print(\"URL validated successfully!\")\n break\n\n pathForSong = qr.path(\n \"Select the Folder to which you want to download the songs:\",\n only_directories=True,\n ).ask()", "def _retrieve_plays(self):\n try:\n recents = self._spotify._get(\"me/player/recently-played\", limit=50)\n except SpotifyException as se:\n if 'The access token expired' in se.msg:\n self._renew_tokens()\n recents = self._spotify._get(\"me/player/recently-played\", limit=50)\n else:\n raise\n self._plays = recents['items']", "def test_list_playlists_by_logged_in_user_with_organization_memberships(self):\n user = factories.UserFactory()\n\n org_1 = factories.OrganizationFactory()\n factories.OrganizationAccessFactory(\n user=user, organization=org_1, role=models.ADMINISTRATOR\n )\n playlist_1 = factories.PlaylistFactory(\n lti_id=\"playlist#one\", organization=org_1, title=\"First playlist\"\n )\n\n org_2 = factories.OrganizationFactory()\n factories.OrganizationAccessFactory(\n user=user, organization=org_2, role=models.ADMINISTRATOR\n )\n playlist_2 = factories.PlaylistFactory(\n lti_id=\"playlist#two\", organization=org_2, title=\"Second playlist\"\n )\n\n # User is not a member of this organization\n org_3 = factories.OrganizationFactory()\n factories.PlaylistFactory(organization=org_3)\n\n # User is member but as instructor of this organization\n org_4 = factories.OrganizationFactory()\n factories.OrganizationAccessFactory(\n user=user, organization=org_4, role=models.INSTRUCTOR\n )\n factories.PlaylistFactory(organization=org_4)\n\n jwt_token = UserAccessTokenFactory(user=user)\n\n response = self.client.get(\n \"/api/playlists/\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json()[\"count\"], 2)\n self.assertEqual(\n response.json()[\"results\"],\n [\n {\n \"consumer_site\": {\n \"id\": str(playlist_2.consumer_site.id),\n \"domain\": playlist_2.consumer_site.domain,\n \"name\": playlist_2.consumer_site.name,\n },\n \"created_by\": None,\n \"created_on\": playlist_2.created_on.isoformat().replace(\n \"+00:00\", \"Z\"\n ),\n \"duplicated_from\": None,\n \"id\": str(playlist_2.id),\n \"is_portable_to_consumer_site\": False,\n \"is_portable_to_playlist\": True,\n \"is_public\": False,\n \"lti_id\": \"playlist#two\",\n \"organization\": {\n \"id\": str(org_2.id),\n \"name\": org_2.name,\n },\n \"portable_to\": [],\n \"retention_duration\": None,\n \"title\": \"Second playlist\",\n \"users\": [],\n \"can_edit\": True,\n },\n {\n \"consumer_site\": {\n \"id\": str(playlist_1.consumer_site.id),\n \"domain\": playlist_1.consumer_site.domain,\n \"name\": playlist_1.consumer_site.name,\n },\n \"created_by\": None,\n \"created_on\": playlist_1.created_on.isoformat().replace(\n \"+00:00\", \"Z\"\n ),\n \"duplicated_from\": None,\n \"id\": str(playlist_1.id),\n \"is_portable_to_consumer_site\": False,\n \"is_portable_to_playlist\": True,\n \"is_public\": False,\n \"lti_id\": \"playlist#one\",\n \"organization\": {\n \"id\": str(org_1.id),\n \"name\": org_1.name,\n },\n \"portable_to\": [],\n \"retention_duration\": None,\n \"title\": \"First playlist\",\n \"users\": [],\n \"can_edit\": True,\n },\n ],\n )", "def _list_users(self):\n users = fileIO.load_json(\"users.json\")\n print(\"The list of users is as follows:\")\n for i in users:\n print(users[i][\"name\"])\n self._list_user_settings(users)", "def list_users():\n\ttry:\n\t\tusers_call = sc.api_call(\"users.list\")\n\t\tusers = []\n\t\tif users_call.get('ok'):\n\t\t\treturn users_call['members']\n\texcept:\n\t\tprint(\"users error\")\n\treturn None", "def test_list_all_bucektlists_for_authenticated_user(self):\n\n response = self.client.get(\n \"/bucketlists/\",\n headers={'Authorization': self.user_token}\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data, '[]\\n')", "def createspotifyplaylist(accesstoken, name, playlists, tracklist, userid):\n\n # find a unique name for the playlist\n playlistname = \"{} - flowed\".format(name)\n if playlistname in playlists:\n num = 1\n playlistname = \"{} - flowed ({})\".format(name, num)\n while playlistname in playlists:\n num = num + 1\n playlistname = \"{} - flowed ({})\".format(name, num)\n\n # create playlist\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"name\"] = playlistname\n\n url = \"https://api.spotify.com/v1/users/{}/playlists\".format(userid)\n\n r = requests.post(url, headers=headers, json=payload)\n\n response = r.json()\n\n\n if \"collaborative\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n retry = True\n while retry:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n r = requests.post(url, headers=headers, json=payload)\n response = r.json()\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n continue\n else:\n print(\"error: problem creating spotify playlist\")\n print(response[\"error\"])\n return(False)\n elif \"collaborative\" in response:\n break\n else:\n print(\"error: problem creating spotify playlist\")\n print('no error response')\n return(False)\n else: \n print(\"error: problem creating spotify playlist\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: problem creating spotify playlist\")\n print('no error response')\n return(False)\n\n playlistid = response[\"id\"]\n playlisturl = response[\"external_urls\"][\"spotify\"]\n\n # add tracks to playlist\n while len(tracklist) > 100:\n\n # add first 100\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"uris\"] = tracklist[:100]\n\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n\n response = r.json()\n if \"snapshot_id\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n continue\n else:\n print(\"error: problem adding songs to playlist\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: problem adding songs to playlist\")\n print(\"no error response\")\n return(False)\n\n tracklist = tracklist[100:]\n\n if tracklist:\n\n # add the remainder of the tracks\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n headers[\"Content-Type\"] = \"application/json\"\n\n payload = {}\n payload[\"uris\"] = tracklist\n\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n\n response = r.json()\n if \"snapshot_id\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n retry = True\n while retry:\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n r = requests.post(\"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\"\n .format(userid, playlistid),\n headers=headers,\n json=payload)\n response = r.json()\n if \"snapshot_id\" in response:\n break\n elif response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n continue\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(\"no error response\")\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(response[\"error\"])\n return(False)\n else:\n print(\"error: createspotifyplaylist request failed\")\n print(\"no error response\")\n return(False)\n\n return(playlistname, playlisturl)", "def show_playlist(self, playlist_name):\n playlist_id = playlist_name.lower()\n if not playlist_id in self.playlists.keys():\n print(f\"Cannot show playlist {playlist_name}: Playlist does not exist\")\n return\n\n playlist = self.playlists.get(playlist_id)\n videos = playlist.videos\n\n if len(videos) == 0:\n print(f\"Showing playlist: {playlist_name}\")\n print(\"No videos here yet\")\n return\n\n print(f\"Showing playlist: {playlist_name}\")\n for video_id in videos:\n print(self._video_library.get_video(video_id))\n return", "async def async_setup(hass, config):\n conf = config[DOMAIN]\n\n username = conf[CONF_USERNAME]\n password = conf[CONF_PASSWORD]\n accounts = conf.get(CONF_ACCOUNTS)\n\n @callback\n def websocket_handle_playlists(hass, connection, msg):\n \"\"\"Handle get playlist\"\"\"\n import spotipy\n access_token, expires = get_spotify_token(username=username, password=password)\n client = spotipy.Spotify(auth=access_token)\n resp = client._get('views/made-for-x?content_limit=10&locale=en&platform=web&types=album%2Cplaylist%2Cartist%2Cshow%2Cstation', limit=10,\n offset=0)\n connection.send_message(\n websocket_api.result_message(msg[\"id\"], resp)\n )\n\n def get_spotify_token(username, password):\n import spotify_token as st\n data = st.start_session(username, password)\n access_token = data[0]\n # token_expires = data[1]\n expires = data[1] - int(time.time())\n return access_token, expires\n\n def play(client, spotify_device_id, uri, random_song, repeat):\n # import spotipy\n # import http.client as http_client\n # spotipy.trace = True\n # spotipy.trace_out = True\n # http_client.HTTPConnection.debuglevel = 1\n\n _LOGGER.debug('Version: %s, playing URI: %s on device-id: %s', _VERSION, uri, spotify_device_id)\n if uri.find('track') > 0:\n _LOGGER.debug('Playing track using uris= for uri: %s', uri)\n client.start_playback(device_id=spotify_device_id, uris=[uri])\n else:\n if uri == 'random':\n _LOGGER.debug('Cool, you found the easter egg with playing a random playlist')\n playlists = client.user_playlists('me', 50)\n no_playlists = len(playlists['items'])\n uri = playlists['items'][random.randint(0, no_playlists - 1)]['uri']\n kwargs = {'device_id': spotify_device_id, 'context_uri': uri}\n if random_song:\n results = client.user_playlist_tracks(\"me\", uri)\n position = random.randint(0, results['total'] - 1)\n _LOGGER.debug('Start playback at random position: %s', position)\n kwargs['offset'] = {'position': position}\n\n _LOGGER.debug('Playing context uri using context_uri for uri: \"%s\" (random_song: %s)', uri, random_song)\n client.start_playback(**kwargs)\n if repeat:\n _LOGGER.debug('Turning repeat on')\n time.sleep(5)\n client.repeat(state=repeat, device_id=spotify_device_id)\n\n def get_account_credentials(call):\n \"\"\" Get credentials for account \"\"\"\n account = call.data.get(CONF_SPOTIFY_ACCOUNT)\n user = username\n pwd = password\n if account is not None:\n _LOGGER.debug('setting up with different account than default %s', account)\n user = accounts.get(account).get(CONF_USERNAME)\n pwd = accounts.get(account).get(CONF_PASSWORD)\n return user, pwd\n\n def shouldTransferPlayback(call, client):\n \"\"\" Check if something is playing \"\"\"\n uri = call.data.get(CONF_SPOTIFY_URI)\n if uri is None or uri.strip() == '' or call.data.get(CONF_TRANSFER_PLAYBACK):\n current_playback = client.current_playback()\n if current_playback is not None:\n _LOGGER.debug('current_playback from spotipy: %s', current_playback)\n return True\n return False\n\n async def start_casting(call):\n \"\"\"service called.\"\"\"\n import spotipy\n\n uri = call.data.get(CONF_SPOTIFY_URI)\n random_song = call.data.get(CONF_RANDOM, False)\n repeat = call.data.get(CONF_REPEAT)\n\n # Account\n user, pwd = get_account_credentials(call)\n\n # login as real browser to get powerful token\n access_token, expires = get_spotify_token(username=user, password=pwd)\n\n # get the spotify web api client\n client = spotipy.Spotify(auth=access_token)\n\n # launch the app on chromecast\n spotify_cast_device = SpotifyCastDevice(hass, call.data.get(CONF_DEVICE_NAME), call.data.get(CONF_ENTITY_ID))\n spotify_cast_device.startSpotifyController(access_token, expires)\n spotify_device_id = spotify_cast_device.getSpotifyDeviceId(client)\n\n transfer_playback = shouldTransferPlayback(call, client)\n if transfer_playback == True:\n _LOGGER.debug('Transfering playback')\n client.transfer_playback(\n device_id=spotify_device_id, force_play=True)\n else:\n play(client, spotify_device_id, uri, random_song, repeat)\n\n # Register websocket and service\n hass.components.websocket_api.async_register_command(\n WS_TYPE_SPOTCAST_PLAYLISTS, websocket_handle_playlists, SCHEMA_PLAYLISTS\n )\n\n hass.services.async_register(DOMAIN, 'start', start_casting,\n schema=SERVICE_START_COMMAND_SCHEMA)\n\n return True", "def list_users():\n\n users = User.query.order_by(\"last_name\").all()\n return render_template(\"users/user_list.html\", users=users)", "def create_playlist():\n sp = credentials()\n sp.user_playlist_create('truetiming', name='Billboard Hot 100')", "def test_list_playlists_for_organization_by_logged_in_user_with_organization_memberships(\n self,\n ):\n user = factories.UserFactory()\n\n org_1 = factories.OrganizationFactory()\n factories.OrganizationAccessFactory(\n user=user, organization=org_1, role=models.ADMINISTRATOR\n )\n playlist_1 = factories.PlaylistFactory(\n lti_id=\"playlist#eleven\", organization=org_1, title=\"First playlist\"\n )\n\n # User is a member of this organization, but it is not included in the request below\n org_2 = factories.OrganizationFactory()\n factories.OrganizationAccessFactory(\n user=user, organization=org_2, role=models.ADMINISTRATOR\n )\n factories.PlaylistFactory(organization=org_2, title=\"Second playlist\")\n\n # User is not a member of this organization\n org_3 = factories.OrganizationFactory()\n factories.PlaylistFactory(organization=org_3)\n\n # User is member but as instructor of this organization\n org_4 = factories.OrganizationFactory()\n factories.OrganizationAccessFactory(\n user=user, organization=org_4, role=models.INSTRUCTOR\n )\n factories.PlaylistFactory(organization=org_4)\n\n jwt_token = UserAccessTokenFactory(user=user)\n\n response = self.client.get(\n f\"/api/playlists/?organization={str(org_1.id)}\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json()[\"count\"], 1)\n self.assertEqual(\n response.json()[\"results\"],\n [\n {\n \"consumer_site\": {\n \"id\": str(playlist_1.consumer_site.id),\n \"domain\": playlist_1.consumer_site.domain,\n \"name\": playlist_1.consumer_site.name,\n },\n \"created_by\": None,\n \"created_on\": playlist_1.created_on.isoformat().replace(\n \"+00:00\", \"Z\"\n ),\n \"duplicated_from\": None,\n \"id\": str(playlist_1.id),\n \"is_portable_to_consumer_site\": False,\n \"is_portable_to_playlist\": True,\n \"is_public\": False,\n \"lti_id\": \"playlist#eleven\",\n \"organization\": {\n \"id\": str(org_1.id),\n \"name\": org_1.name,\n },\n \"portable_to\": [],\n \"retention_duration\": None,\n \"title\": \"First playlist\",\n \"users\": [],\n \"can_edit\": True,\n },\n ],\n )", "def get_watchlists(user_id):\n # user = User.query.get(user_id)\n\n watchlists = Watchlist.query.filter(Watchlist.user_id == user_id).all()\n\n return watchlists", "def test_listuser():\n url = baseUrl + userurl + listurl\n logging.info(\"List users\")\n r = requests.get(url, headers=header)\n assert r.status_code == 200\n resp = r.json()\n global user_ids\n user_ids = []\n if resp is None:\n pass\n else:\n user_num = len(resp)\n for k in range(0, user_num):\n assert resp[k]['subscriptionIds'][0] == subscriptionid\n if resp[k][\"isActive\"] is True:\n user_ids.append(resp[k][\"id\"])\n print (user_ids)\n assert user_id in user_ids", "def show_playlist(self, playlist_name):\n if playlist_name.lower() not in self._playlists:\n print(f\"Cannot show playlist {playlist_name}: Playlist does not exist\")\n return\n playlist = self._playlists[playlist_name.lower()]\n print(f\"Showing playlist: {playlist_name}\")\n if not playlist.videos:\n print(\"No videos here yet\")\n for video in playlist.videos:\n print(video)", "def list_users():\n\n db_users = User.query.all()\n\n return render_template(\"list_users.html\", headline=\"Blogly Users\", users=db_users)", "def source_list(self):\n return self._playlists" ]
[ "0.7721551", "0.76566833", "0.7571428", "0.74551755", "0.74398583", "0.7157419", "0.70816904", "0.7019386", "0.7003413", "0.6947375", "0.69466627", "0.6937739", "0.6878575", "0.67284", "0.6687034", "0.6654773", "0.66512775", "0.6564396", "0.63236004", "0.6259983", "0.6246766", "0.6204369", "0.6197756", "0.6172113", "0.6141708", "0.61111027", "0.60696745", "0.6059903", "0.6051912", "0.604217", "0.602885", "0.6001851", "0.5988682", "0.5979663", "0.59498465", "0.5943213", "0.59416395", "0.59258485", "0.59256864", "0.59248215", "0.5906498", "0.5901547", "0.58921903", "0.5879833", "0.58642834", "0.58640635", "0.5839991", "0.58387554", "0.58269954", "0.581745", "0.5810844", "0.5808602", "0.5806034", "0.5788386", "0.57882804", "0.57854867", "0.5779079", "0.5764951", "0.57636404", "0.57626635", "0.57583123", "0.5756416", "0.5755861", "0.57519484", "0.5750162", "0.5747918", "0.57476044", "0.57454944", "0.57440406", "0.57326573", "0.5724374", "0.5724374", "0.5724374", "0.5724374", "0.5724374", "0.5724374", "0.5714776", "0.5714637", "0.5709813", "0.57000893", "0.5672879", "0.56609374", "0.56498545", "0.5646033", "0.5638994", "0.5631991", "0.5631402", "0.5627661", "0.56248856", "0.5623594", "0.5623211", "0.56222034", "0.56194687", "0.5602534", "0.5600936", "0.55967104", "0.5593167", "0.5581596", "0.5567933", "0.5560744" ]
0.72365195
5
[list all the songs for a given playlist id]
def list_pl_songs(self, pl_id, user=None): if user: res = self.sp.user_playlist_tracks(user, pl_id) else: res = self.sp.user_playlist_tracks(self.user, pl_id) song_uri_ls = [song['track']['uri'] for song in res['items']] song_ls = [] for i, song in enumerate(res['items']): song_ls.append([i, song['track']['name'][0:20].strip(), song['track']['album']['name'][0:20].strip(), "%0.2f" % (song['track']['duration_ms'] / 60000), song['track']['popularity']]) return song_uri_ls, song_ls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_playlist_songs(self, playlist_id):\n url = get_playlist_url(playlist_id)\n result = self.get_request(url)\n return result['result']['tracks'], result['result']['name']", "def get_playlist_songs(self, playlist_id):\n values = {'action' : 'playlist_songs',\n 'filter' : playlist_id,\n }\n root = self.__call_api(values)\n songs = root.getElementsByTagName('song')\n if not songs:\n return None\n l= []\n try:\n for song in songs:\n song_id = int(song.getAttribute('id'))\n song_title = song.getElementsByTagName('title')[0].childNodes[0].data\n artist_id = int(song.getElementsByTagName('artist')[0].getAttribute('id'))\n artist_name = song.getElementsByTagName('artist')[0].childNodes[0].data\n album_id = int(song.getElementsByTagName('album')[0].getAttribute('id'))\n album_name = song.getElementsByTagName('album')[0].childNodes[0].data\n\n song_track = int(song.getElementsByTagName('track')[0].childNodes[0].data)\n song_time = int(song.getElementsByTagName('time')[0].childNodes[0].data)\n song_size = int(song.getElementsByTagName('size')[0].childNodes[0].data)\n\n try: # New Ampache puts nothing here...\n precise_rating = int(song.getElementsByTagName('preciserating')[0].childNodes[0].data)\n except:\n precise_rating = 0\n try:\n rating = float(song.getElementsByTagName('rating')[0].childNodes[0].data)\n except:\n rating = 0\n art = song.getElementsByTagName('art')[0].childNodes[0].data\n url = song.getElementsByTagName('url')[0].childNodes[0].data\n song_dict = {\n 'song_id' : song_id,\n 'song_title' : song_title,\n 'artist_id' : artist_id,\n 'artist_name' : artist_name,\n 'album_id' : album_id,\n 'album_name' : album_name,\n 'song_track' : song_track,\n 'song_time' : song_time,\n 'song_size' : song_size,\n 'precise_rating' : precise_rating,\n 'rating' : rating,\n 'art' : art,\n 'url' : url,\n }\n l.append(song_dict)\n except:\n print(\"This playlist failed\", playlist_id)\n traceback.print_exc()\n return None\n return l", "def get_playlist_tracks(playlist_id):\n\n results = spotifyObject.playlist_tracks(playlist_id)\n tracks = results['items']\n while results['next']:\n results = spotifyObject.next(results)\n tracks.extend(results['items'])\n return tracks", "def getTracks(playlist_id):\n\n tracks = crud.getTracks(session, playlist_id)\n\n return tracks", "def playlist(self):\n def iconv(s):\n encoding = self.options[\"id3_encoding\"]\n try:\n if encoding:\n return s.encode('latin1').decode(encoding).encode('utf-8')\n else:\n return s.encode('latin1')\n except UnicodeEncodeError:\n return \"\"\n\n lst = []\n r = self.x.playlist_list_entries()\n r.wait()\n for id in r.get_list():\n r = self.x.medialib_get_info(id)\n r.wait()\n if r.iserror():\n print r.get_error()\n lst.append(' ')\n continue\n song = r.get_propdict()\n try:\n artist = iconv(song[('plugin/id3v2', 'artist')])\n except KeyError:\n try:\n artist = iconv(song[('plugin/mad', 'artist')])\n except KeyError:\n artist = ''\n try:\n title = iconv(song[('plugin/id3v2', 'title')])\n except KeyError:\n try:\n title = iconv(song[('plugin/mad', 'title')])\n except KeyError:\n title = ''\n if artist == \"\" and title == \"\":\n name = os.path.split(song[('server', 'url')])[1]\n name = os.path.splitext(name)[0]\n name = urllib.unquote(name.decode('utf-8').encode('latin1'))\n name = name.replace(\"+\", \" \")\n lst.append(' ' + name)\n else:\n lst.append(' %s - %s' % (artist.ljust(6), title))\n\n return lst", "def get_playlist_by_id(self, request):\n pl = Playlist.find_by_id(request.pid)\n response = PlaylistResponse(pid=pl.key.id(),\n name=pl.name,\n songs=[])\n songs = Song.find_by_playlist(pl.key).fetch()\n for song in songs:\n response.songs.append(SongMessage(id=song.key.id(),\n spotify_id=song.spotify_id,\n name=song.name,\n vote_count=song.vote_count))\n return response", "def playlist(self, playlist_id: str, fields: str = None,\n market: str = 'from_token'):\n return self._get('playlists/' + playlist_id,\n fields=fields, market=market)", "def get_playlist(self, object_id):\n return self.get_object(\"playlist\", object_id)", "def get_track_ids_of_playlist(self, playlist_id):\n def get_playlist_data(url):\n req = requests.get(url, headers=self.__header_bearer())\n return req.json() if req.status_code == 200 else False\n\n track_uris = []\n\n endpoint = f'/playlists/{playlist_id}/tracks'\n url = f'{self.api_base_url}{endpoint}'\n\n playlist_data = get_playlist_data(url)\n while True:\n if not playlist_data:\n break\n\n for track in playlist_data['items']:\n track_uris.append(track['track']['uri'])\n\n if not playlist_data['next']:\n break\n else:\n time.sleep(0.5)\n playlist_data = get_playlist_data(playlist_data['next'])\n return track_uris", "def get_album_songs(self, album_id):\n url = get_album_url(album_id)\n result = self.get_request(url)\n\n return result['album']['songs']", "def spotify_playlist_as_json_tracks(playlist_id: int, access_token: str) -> list:\n query_url = \"https://api.spotify.com/v1/playlists/{}/tracks\".format(playlist_id)\n query_headers = {\"Authorization\": \"Bearer {}\".format(access_token)}\n # Get playlist tracks\n tracks_response = requests.get(query_url, headers=query_headers)\n if tracks_response.status_code != 200:\n return tracks_response.reason\n tracks_json = tracks_response.json()\n if \"error_description\" in tracks_json:\n return []\n # Get list of tracks\n tracks = []\n while \"next\" in tracks_json and tracks_json[\"next\"] is not None:\n for t in tracks_json[\"items\"]:\n tracks.append(t[\"track\"])\n tracks_json = requests.get(tracks_json[\"next\"], headers=query_headers).json()\n return tracks", "def playlist_track_ids(playlist_id, authorizer, verbose=False):\n spotify_endpoint = f'https://api.spotify.com/v1/playlists/{playlist_id}/tracks'\n params = {'fields':'items(track(id)),next,total'} # only get id's of tracks, and total number of tracks in playlist\n headers = {\"Accept\":\"application/json\", \"Content-Type\":\"application/json\", \"Authorization\": \"Bearer {bearer}\".format(bearer=authorizer.bearer)}\n\n tracks = None\n index = 0\n \n # stops when no more pages left\n while spotify_endpoint:\n response = requests.get(spotify_endpoint, params=params, headers=headers)\n\n if response.status_code == 200:\n data = response.json()\n \n # allocate array for tracks\n if tracks is None:\n tracks = [''] * data['total']\n \n # add tracks to array\n for track in data['items']:\n i = track['track']['id']\n tracks[index] = i\n index += 1\n\n # move forward in paging\n spotify_endpoint = data['next']\n elif response.status_code == 429:\n limit = int(response.headers['Retry-After'])\n print('Hit rate limit, waiting for {} seconds to continue'.format(limit))\n time.sleep(limit)\n elif response.status_code == 401:\n print('Access token expired, refreshing...')\n authorizer.refresh()\n else:\n print('Error %d' % response.status_code)\n if verbose:\n print(json.loads(response.text))\n return None\n\n return [t for t in tracks if t is not None] # filter out null tracks", "def get_all_songs_in_pl(self, playlist):\n to_send = self.db.get_songs(playlist)\n to_send = DOLLAR.join(to_send)\n self.send_message(to_send)", "def playlistid(self, track_id=None):\n track_id = '' if track_id is None else track_id\n lines = yield from self.command('playlistid {}'.format(track_id))\n return parse_playlist(lines)", "async def playlist(self, ctx, *, query):\n # Setup the headers with the token that should be here\n headers = {\"Authorization\": \"Bearer {}\".format(self._token)}\n opts = {\"q\": query, \"type\": \"playlist\"}\n url = \"https://api.spotify.com/v1/search\"\n response = await utils.request(url, headers=headers, payload=opts)\n try:\n await ctx.send(\n response.get(\"playlists\")\n .get(\"items\")[0]\n .get(\"external_urls\")\n .get(\"spotify\")\n )\n except (KeyError, AttributeError, IndexError):\n await ctx.send(\"Couldn't find a song for:\\n{}\".format(query))", "async def get_playlist(self, part=\"snippet\", max_results=7, playlist_id=\"\", playlist_url=\"\"):\n\n url = self.url_api.get_playlist_url(playlist_id, part, max_results, playlist_url)\n\n response = await self.session.get(url)\n search_results = await response.json()\n return search_results", "def get_playlist_items(self, playlist_id):\n return_val = []\n max_window = 50\n if self.youtube is None:\n self.youtube = __get_client()\n count = 1\n response = self.youtube.playlistItems()\\\n .list(part=\"snippet\",\n playlistId=playlist_id,\n maxResults=max_window).execute()\n return_val = return_val + response['items']\n while 'nextPageToken' in response:\n # response has nextPageToken and prevPageToken properties\n response = self.youtube.playlistItems()\\\n .list(part=\"snippet\",\n playlistId=playlist_id,\n maxResults=max_window,\n pageToken=response['nextPageToken'])\\\n .execute()\n return_val = return_val + response['items']\n return return_val", "def get_songs_of_artist(self, artist_id: int):\n\t\tartist = self.db.artists.find_one({'id': artist_id})\n\t\treturn artist['songs']", "def get_songs_by_album(self, album_id):\n return self.__get('song', album_id)", "def get_song(_id):\r\n return [Song.song_json(Song.query.filter_by(id=_id).first())]\r\n # Song.song_json() coverts our output to the json format defined earlier\r\n # the filter_by method filters the query by the id\r\n # since our id is unique we will only get one result\r\n # the .first() method will get that first value returned\r", "def get_playlist_by_id(cls, id):\n try:\n return cls._playlists_by_id[id]\n except KeyError:\n return None", "def get_playlist_items(self):\n results = self.API.playlist(self.playlist_uri)\n return results[\"tracks\"][\"items\"]", "def get_playlist_tracks(user, playlist_id, limit=100):\n info_dict = spotify.user_playlist_tracks(user, playlist_id, limit=limit)\n items = info_dict[\"items\"]\n tracks = []\n for i in range(len(items)):\n album_name = items[i][\"track\"][\"album\"][\"name\"]\n album_type = items[i][\"track\"][\"album\"][\"album_type\"]\n artists_names = ', '.join([\n items[i][\"track\"][\"artists\"][index][\"name\"]\n for index in range(len(items[i][\"track\"][\"artists\"]))\n ])\n track_name = items[i][\"track\"][\"name\"]\n popularity = items[i][\"track\"][\"popularity\"]\n track_id = items[i][\"track\"][\"id\"]\n tracks.append({\"Album Name\": album_name,\n \"Album Type\": album_type,\n \"Artist(s)\": artists_names,\n \"Track Name\": track_name,\n \"Popularity\": popularity,\n \"Track ID\": track_id\n })\n tracks.sort(key=lambda d: d['Popularity'], reverse=True)\n return tracks", "def get_song_list(self):\n return self.song_list", "def get_song_ids(self, playlist_link: str) -> List[str]:\n user = self.init_user()\n playlist_id = self.parse_link_to_id(playlist_link)\n res = user.playlist_items(playlist_id,\n offset=0,\n fields='items.track.id',\n additional_types=['track'])['items']\n return [item['track']['id'] for item in res]", "def get_playlists_from(category_id):\n playlist_uris = []\n for item in spotify.category_playlists(category_id)['playlists']['items']:\n playlist_uris.append(item['uri'])\n\n return playlist_uris", "def get_playlists(search_string=None):\n item_type = 'playlists'\n info_dict = spotify.category_playlists(search_string)\n items = info_dict[item_type][\"items\"]\n playlists = []\n for i in range(len(items)):\n playlist_name = items[i][\"name\"]\n owner_name = items[i][\"owner\"][\"display_name\"]\n total_tracks = items[i][\"tracks\"][\"total\"]\n playlist_id = items[i][\"id\"]\n owner_id = items[i][\"owner\"][\"id\"]\n playlists.append({\"Playlist Name\": playlist_name,\n \"Owner Name\": owner_name,\n \"No. of tracks\": total_tracks,\n \"Playlist ID\": playlist_id,\n \"Owner ID\": owner_id\n })\n return playlists", "def playlist(self):\n _LOGGER.debug(\"Fetching Playlist info\")\n parameters = {\n 'cmd': None,\n 'param3': 'playlist.json'\n }\n try:\n res = requests.get(url=self.url, headers=headers, params=parameters, timeout=self.timeout).json()\n except (ConnectionError, OSError) as e:\n _LOGGER.error(\"Fetching playlist info failed: %s\", e)\n res = None\n return res", "def playlist_tracks(self, playlist_id: str, fields: str = None,\n market: str = 'from_token', limit: int = 100,\n offset: int = 0):\n return self._get(f'playlists/{playlist_id}/tracks', limit=limit,\n offset=offset, fields=fields, market=market)", "def playlist_videos(playlist_id):\r\n url = PLAYLIST_ITEMS_URL.format(API_KEY, playlist_id)\r\n response = util.web.http_get(url=url, json=True, referer='https://tinychat.com')\r\n\r\n if response['json'] is not None:\r\n video_list = []\r\n # next_page_token = response['json']['nextPageToken']\r\n try:\r\n if 'items' in response['json']:\r\n for item in response['json']['items']:\r\n video_id = item['snippet']['resourceId']['videoId']\r\n details = video_details(video_id)\r\n if details is not None:\r\n info = {\r\n 'type': 'youTube',\r\n 'video_id': video_id,\r\n 'video_title': details['video_title'],\r\n 'video_time': details['video_time']\r\n }\r\n video_list.append(info)\r\n return video_list\r\n except KeyError as ke:\r\n log.error(ke, exc_info=True)\r\n return None", "def web_archive_import_playlist(id):\n\n db = get_db()\n user_id = flask.session['user']['id']\n\n for item in yt_get_playlist_items(id):\n video_id = item['snippet']['resourceId']['videoId']\n video = yt_get_video(video_id)\n channel_id = video['snippet']['channelId']\n\n archive = None\n for playlist in db_get_archives():\n if playlist['contentDetails']['itemCount'] < 5000:\n archive = playlist\n break\n\n if archive is None:\n archive = yt_create_playlist()\n\n if yt_insert_to_playlist(video_id, archive['id']):\n if channel_id not in db[user_id]:\n db[user_id][channel_id] = {\n 'played': {}, 'archived': {}\n }\n db[user_id][channel_id]['archived'][video_id] = archive['id']\n update_db(db)", "def get_playlists(self):\n values = {\n 'action' : 'playlists',\n }\n root = self.__call_api(values)\n nodes = root.getElementsByTagName('playlist')\n if not nodes: # list is empty, reauth\n return None\n\n l = []\n try:\n for child in nodes:\n id = int(child.getAttribute('id'))\n name = child.getElementsByTagName('name')[0].childNodes[0].data\n owner = child.getElementsByTagName('owner')[0].childNodes[0].data\n items = int(child.getElementsByTagName('items')[0].childNodes[0].data)\n type = child.getElementsByTagName('type')[0].childNodes[0].data\n\n d = {\n 'id' : id,\n 'name' : name,\n 'items' : items,\n 'owner' : owner,\n 'type' : type,\n }\n l.append(d)\n except: #something failed\n traceback.print_exc()\n return []\n return l", "def get_yt_playlist(play_list_id):\n request = Request(f\"https://www.youtube.com/playlist?list={play_list_id}&hl=en\", headers=_HEADERS)\n\n with urlopen(request, timeout=_TIMEOUT) as resp:\n data = gzip.decompress(resp.read()).decode(\"utf-8\")\n parser = PlayListParser()\n parser.feed(data)\n return parser.header, parser.playlist", "def search_for_tracks(album_id):\n \n track_results = spotifyObject.album_tracks(album_id)\n track_results = track_results['items']\n ids = [track['id'] for track in track_results]\n\n return ids", "def get_playlist_tracks(playlist):\n track_ids = [id for id in load_from_json(f\"playlist_{playlist}.json\") if id is not None]\n tracks = []\n\n for i in range(0, len(track_ids), 50):\n tracks_info = sp.tracks(track_ids[i: i+50])['tracks']\n for track in tracks_info:\n if track:\n tracks.append({\n 'id': track['id'],\n 'name': track['name'],\n 'popularity': track['popularity']\n })\n df = pd.DataFrame(tracks)\n\n file = f\"playlist_{playlist}_df.csv\"\n df.to_csv(file)\n\n return file", "def get_all_songs():\r\n return [Song.song_json(song) for song in Song.query.all()]", "def get_playlist_data(id, fetch_all_videos=False, feed='playlists'):\n total = 0\n seen = []\n videos = []\n params = {'start-index': 1, 'max-results': (50 if fetch_all_videos else 1)}\n while True:\n youtube_data = _youtube_feed(feed, id, params)['feed']\n total = youtube_data['openSearch$totalResults']['$t']\n limit = min(total, app.config.get('YOUTUBE_IMPORT_LIMIT', 100))\n entries = youtube_data.get('entry', [])\n for entry in entries:\n video = _get_video_data(entry, id)\n if video.source_videoid not in seen and not video.restricted:\n videos.append(video)\n seen.append(video.source_videoid)\n if entries and fetch_all_videos and len(videos) < limit:\n params['start-index'] += params['max-results']\n continue\n break\n links = dict((l['rel'], l['href']) for l in youtube_data['link'])\n if 'hub' in links:\n # strip extraneous query params from topic url\n topic_url = links['self'].split('?', 1)[0] + '?v=2'\n push_config = PushConfig(links['hub'], topic_url)\n else:\n push_config = None\n return Playlist(youtube_data['title']['$t'], total, videos, push_config)", "def pc_get_music_list_by_id(self, device_udn, parent_id, start_index, list_count):\n response = self.get(COMMAND_UIC, 'PCGetMusicListByID', [\n ('device_udn', device_udn),\n ('filter', 'folder'),\n ('parentid', str(parent_id)),\n ('liststartindex', int(start_index)),\n ('listcount', int(list_count)),\n ])\n\n if not int(response['listcount']):\n return []\n\n return response_list(response['musiclist']['music'])", "def selectPlay(id):\n\tsong = music.song()\n\tsql = \"SELECT id, title, path, filename, hash, base FROM songs \" \\\n\t\t+ \"WHERE id = \" + str(id) + \";\"\n\tc, conn = connect()\n\tc.execute(sql)\n\tsinfo = c.fetchone()\n\t\n\tif sinfo[0]:\n\t\tsong.id = sinfo[0]\n\tif sinfo[1]:\n\t\tsong.name = sinfo[1]\n\tif sinfo[2]:\n\t\tsong.path = sinfo[2]\n\tif sinfo[3]:\n\t\tsong.filename = sinfo[3]\n\tif sinfo[4]:\n\t\tsong.hash = sinfo[4]\n\tif sinfo[5]:\n\t\tsong.base = sinfo[5]\n\t\n\treturn song", "def from_id(id):\n response = settings.database.get_item(Key={'id': id})\n raise_for_response(response)\n if not \"Item\" in response.keys():\n raise NotFoundException(\"Playlist with id \" + str(id) + \" couldn't be found\")\n playlist = Playlist()\n playlist.init_from_body(response[\"Item\"])\n return playlist", "def simple_songs_list(name_of_album):\r\n songs = []\r\n data1 = dbase()\r\n data1 = data1[name_of_album][0]\r\n for song in data1.keys():\r\n songs += [song]\r\n return songs", "def get_playlist_contents(playlist_id, user_id, limit=100):\n\ttoken = get_token()\n\theaders = {'Authorization': 'Bearer ' + token}\n\tbase_url = SPOTIFY_API_HOST + 'users/{0}/playlists/{1}/tracks?limit={2}'\n\turl = base_url.format(SPOTIFY_USER_ID, SPOTIFY_PLAYLIST_ID, limit)\n\tresponse = requests.get(url, headers=headers).json() # Todo: Handle errors here. Not using this function so ok for now.\n\n\turis = []\n\tfor item in response['items']:\n\t\turi_string = item['track']['uri']\n\t\turis.append(uri_string[uri_string.rfind(':')+1:])\n\treturn uris", "def fetchAlbumIds(artist_id):\n url = 'https://api.spotify.com/v1/artists/' + artist_id + '/albums?market=US&album_type=album'\n req = requests.get(url)\n\n data = req.json()\n\n #checking for bad return value\n if not req.ok:\n print \"error : \" + data['error']['message']\n return \"error : \" + data['error']['message']\n\n albums = []\n for item in data['items']:\n \talbums.append(item['id'])\n\n return albums", "def get_videos_in_playlist(self):\n\n self.ydl = youtube_dl.YoutubeDL()\n # uses the youtube_dl as a context manager\n with self.ydl:\n self.result = self.ydl.extract_info(\n self.url, extra_info={'listformats': True}, download=False)\n for video in (self. result['entries']):\n video_id = video['id']\n self. url = f'https://www.youtube.com/watch?v={video_id}'\n self. show_formats()", "def getPlaylists():\n\n allPlaylistData = []\n\n spotifyPlaylistData = crud.getPlaylists(session)\n if 'items' in spotifyPlaylistData:\n allPlaylistData = spotifyPlaylistData['items']\n \n savedPlaylistIDs = crud.getSavedPlaylistIDsByUser(int(session['user_id']))\n\n regPlaylistData = [i for i in allPlaylistData if i['id'] not in savedPlaylistIDs]\n savedPlaylistData = [i for i in allPlaylistData if i['id'] in savedPlaylistIDs]\n\n data = {\n 'regPlaylistData': regPlaylistData,\n 'savedPlaylistData': savedPlaylistData\n }\n \n return data", "def get_video_ids(playlist_id):\n \n #search for all the videos given a playlist id\n search_response = youtube.playlistItems().list(part='contentDetails',maxResults=50,playlistId=playlist_id).execute()\n all_videos = search_response['items']\n video_ids = []\n for vid in all_videos:\n video_id = vid['contentDetails']['videoId']\n video_ids.append(video_id)\n\n return video_ids", "def selectSongs():\n\tsql =\"select songs.title, artist.name, album.name from songs, album, \" \\\n\t+ \"artist join songs_album on songs.id=songs_album.songs_id \" \\\n\t+ \"join songs_artist on songs.id=songs_artist.songs_id \" \\\n\t+ \"where album.id=songs_album.album_id \" \\\n\t+ \"and artist.id=songs_artist.artist_id\"\n\tc, conn = connect()\n\tretr = c.execute(sql)\n\tsongs = []\n\tfor entry in retr:\n\t\tsongs.append(music.song(title=entry[0], artist=entry[1], album=entry[2]))\n\treturn songs", "def user_playlist_tracks(\n self,\n playlist_id,\n fields=None,\n limit=100,\n offset=0,\n market=\"from_token\",\n **kwargs,\n ):\n _id = self._get_playlist_id(playlist_id)\n # pylint: disable=no-member\n return self._get(\n API.PLAYLIST_TRACKS.value.format(playlist_id=_id),\n limit=limit,\n offset=offset,\n fields=fields,\n market=market,\n **kwargs,\n )", "def get_playlist_items(self, youtube, playlist_id, max_results=10, batch=None, callback=None):\n def handle_request(request_id, response, exception):\n if exception:\n raise exception\n else:\n vids = []\n for item in response.get('items', []):\n vids.append(item['contentDetails']['videoId'])\n if callback:\n callback(vids)\n else:\n return vids\n pl_request = youtube.playlistItems().list(part='contentDetails', playlistId=playlist_id, maxResults=max_results)\n if batch:\n batch.add(pl_request, callback=handle_request)\n else:\n return handle_request(1, pl_request.execute(), None)", "def get_all_playlist_videos( playlistURL ):\r\n \r\n request = youtube.playlistItems().list(\r\n part=\"contentDetails,id,snippet\",\r\n maxResults=50,\r\n playlistId=\"PLxgoClQQBFjgTMrhvedWk8Q_CVLWwy3ak\"\r\n )\r\n response = request.execute()", "def get_related_playlists(self, music_id, n):\n data = self.safe_sql_request(\n \"SELECT id FROM raw_playlist JOIN playlist_link ON address = playlist_id WHERE music_id = {}\".format(\n str(music_id)))\n\n if data:\n selection_index = [randint(0, len(data) - 1) for _ in range(n)]\n selection_index = list(set(selection_index))\n return [data[index][0] for index in selection_index]\n return []", "def getplaylisttracks(accesstoken, chosenplaylist):\n\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n\n limit = 100\n\n payload = {}\n payload[\"limit\"] = limit\n payload[\"offset\"] = 0\n\n r = requests.get(\n \"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\".format(chosenplaylist.ownerid, chosenplaylist.playlistid),\n headers=headers,\n params=payload)\n\n response = r.json()\n\n if \"items\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait for the amount of time specified in response header\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n # try again\n return(getplaylisttracks(accesstoken, chosenplaylist, userid))\n else:\n print(response[\"error\"])\n return(None)\n else:\n print('error: getplaylisttracks request failed')\n return(None)\n\n numberreceived = len(response[\"items\"])\n totalavailable = response[\"total\"]\n\n for track in response[\"items\"]:\n t = Track()\n t.trackid = track[\"track\"][\"id\"]\n t.albumname = track[\"track\"][\"album\"][\"name\"]\n t.trackname = track[\"track\"][\"name\"]\n t.artistname = track[\"track\"][\"artists\"][0][\"name\"]\n t.popularity = track[\"track\"][\"popularity\"]\n # print(t.trackid, t.trackname, t.artistname, t.albumname)\n chosenplaylist.tracks.append(t)\n\n # if we haven't gotten all of the tracks in the playlist, request the next\n # batch\n\n while numberreceived < totalavailable:\n\n payload[\"offset\"] = payload[\"offset\"] + limit\n r = requests.get(\n \"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\".format(chosenplaylist.ownerid, chosenplaylist.playlistid),\n headers=headers,\n params=payload)\n response = r.json()\n\n if \"items\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait for the amount of time specified in response header\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n # try again\n continue\n else:\n print('error: getplaylisttracks request failed')\n print(response[\"error\"])\n return(None)\n else:\n print('error: unknown error')\n return(None)\n\n for track in response[\"items\"]:\n if track[\"is_local\"]:\n # a locally saved song. skip over it, as no way to query audio \n # features without having a spotify track id\n continue\n t = Track()\n t.trackid = track[\"track\"][\"id\"]\n t.albumname = track[\"track\"][\"album\"][\"name\"]\n t.trackname = track[\"track\"][\"name\"]\n t.artistname = track[\"track\"][\"artists\"][0][\"name\"]\n # print(t.trackid, t.trackname, t.artistname, t.albumname)\n chosenplaylist.tracks.append(t)\n\n \n numberreceived = numberreceived + len(response[\"items\"])\n\n # filter out tracks with trackid == None\n chosenplaylist.tracks = [track for track in chosenplaylist.tracks if track.trackid is not None]\n\n # print(chosenplaylist.tracks)\n return(chosenplaylist)", "def get_videos_by_playlist(playlist_id='', parse_video=True):\n videos = api.get_playlist_items(playlist_id=playlist_id, count=None)\n video_ids = []\n for item in videos.items:\n item = item.to_dict()\n video_ids.append(item['contentDetails']['videoId'])\n if parse_video:\n results = []\n for video_id in video_ids:\n results.append(get_video_by_id(video_id))\n return results\n else:\n return video_ids", "def show_playlist(self, playlist_name):\n playlist_id = playlist_name.lower()\n if not playlist_id in self.playlists.keys():\n print(f\"Cannot show playlist {playlist_name}: Playlist does not exist\")\n return\n\n playlist = self.playlists.get(playlist_id)\n videos = playlist.videos\n\n if len(videos) == 0:\n print(f\"Showing playlist: {playlist_name}\")\n print(\"No videos here yet\")\n return\n\n print(f\"Showing playlist: {playlist_name}\")\n for video_id in videos:\n print(self._video_library.get_video(video_id))\n return", "def getAllSongs(self):\n return self.__songDictionary", "def read_album_tracks(id, artist_name, album_name):\n list_a = [x.name for x in dmla.list_tracks(id)]\n list_c = [x['title'] for x in dmlc.list_tracks_for_album(artist_name, album_name)\n if x['track'] != -1]\n return list_a, list_c", "def get_songs(library):\n songs = []\n for song in library:\n title, artist, album = song['title'], song['artist'], song['album']\n seconds = int(song['durationMillis']) // 1000\n songs.append({'artist': artist, 'title': title, 'album': album, 'seconds': seconds})\n return songs", "async def get_song(self, song_id: int) -> APIReturn:\n return await self._request(\"GET\", \"/getSong\", extra_query={\"id\": song_id})", "def get_playlist_videos_for_analysis(playlist_id: str) -> List[AnalysisToneTube]:\n db = __mongo_client.get_database(constants.DB_NAME)\n videos_collection = db[constants.COLLECTION_VIDEOS]\n\n projection = dict(_id=0, full_text=0, transcripts=0)\n\n # TODO: if there is mych videos, this should be reworked, same for save\n vids = videos_collection. \\\n find({\"playlist_id\": playlist_id}, projection). \\\n sort([(\"publish_date\", pymongo.ASCENDING)]).limit(5000)\n\n tubes = []\n for vid in vids:\n tbt: AnalysisToneTube = AnalysisToneTube.from_dict(vid)\n tubes.append(tbt)\n return tubes", "def get_hot_songs(self, artist_id):\n url = get_artist_url(artist_id)\n result = self.get_request(url)\n return result['hotSongs']", "def spotify_tracklist():\n sp = credentials()\n chart = chartdata()\n trackid_list = []\n #find a way to get track IDS\n for track in chart:\n searchQuery = track[0]\n searchResults = sp.search(q=searchQuery, limit=1, type='track', market=\"US\")\n trackid_list.append(searchResults['tracks']['items'][0]['uri'])\n return trackid_list", "def add_song_to_playlist(self):\n #populate our songs dictionary\n self.get_liked_videos()\n\n #collect all of uri\n uris = []\n for song,info in self.all_song_info.items():\n uris.append(info[\"spotify_uri\"])\n\n #create a new playlist\n playlist_id = self.create_playlist()\n\n #add all songs into new playlist\n\n #Spotipy can only add 100 songs at a time to a playlist that is why this method is taken\n g = len(uris)\n if g > 100:\n s = 0\n e = 99\n while g > 100:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:e])\n g -= 100\n s = e + 1\n e += 100\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:])\n else:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris)", "def get_all_songs() -> Generator[dict, None, None]:\n\n logging.debug(\"Fetching from server\")\n\n api = _get_api()\n\n for song_page in api.get_all_songs(incremental=True):\n for song in song_page:\n yield song", "def getplaylists(accesstoken, userid):\n \n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n\n limit = 50\n\n payload = {}\n payload[\"limit\"] = limit\n payload[\"offset\"] = 0\n\n r = requests.get(\"https://api.spotify.com/v1/me/playlists\",\n headers=headers, \n params=payload)\n\n # print('url = \\n\\n {} \\n\\n'.format(r.url))\n\n response = r.json()\n\n # add data to playlist objects\n if \"items\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait for the amount of time specified in response header\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n # try again\n return(getplaylists(accesstoken, userid))\n\n else:\n print(response[\"error\"])\n return(None)\n else:\n print('error: getplaylists request failed')\n return(None)\n\n numberreceived = len(response[\"items\"])\n totalavailable = response[\"total\"]\n\n playlists = OrderedDict()\n\n for playlist in response[\"items\"]:\n p = Playlist()\n p.images = playlist[\"images\"]\n p.name = playlist[\"name\"]\n p.playlistid = playlist[\"id\"]\n p.ownerid = playlist[\"owner\"][\"id\"]\n playlists[p.name] = p\n\n # if number received less than total available, request more\n while numberreceived < totalavailable:\n # print(\"received={} available={}\".format(numberreceived, totalavailable))\n payload[\"offset\"] = payload[\"offset\"] + limit\n r = requests.get(\"https://api.spotify.com/v1/me/playlists\",\n headers=headers, \n params=payload)\n response = r.json()\n\n if \"items\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait for the amount of time specified in response header\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n # try again\n continue\n else:\n print('error: getplaylists request failed')\n print(response[\"error\"])\n return(None)\n else:\n return(None)\n\n for playlist in response[\"items\"]:\n p = Playlist()\n p.images = playlist[\"images\"]\n p.name = playlist[\"name\"]\n p.playlistid = playlist[\"id\"]\n playlists[p.name] = p\n\n numberreceived = numberreceived + len(response[\"items\"])\n\n return(playlists)", "def play_music(sid):\n # Get the parameters for the get_song_id request\n artist = None\n album = None\n title = None\n if not request.json:\n # If no JSON parameters were given, just resume playing the song\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n spotify.resume(host['ip'])\n return jsonify({})\n else:\n try:\n # Get the host data from the database\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n artist = None\n album = None\n track = None\n if request.json.has_key('track') and request.json.get('track'):\n track = request.json.get('track')\n elif request.json.has_key('album') and request.json.get('album'):\n album = request.json.get('album')\n elif request.json.has_key('artist') and request.json.get('artist'):\n artist = request.json.get('artist')\n else:\n spotify.resume(host['ip'])\n return jsonify({})\n spotify.compound_play(host['ip'], artist=artist, album=album, song=track)\n return jsonify({})\n except:\n abort(400)", "def playlist_items(self):\r\n return v3.PlaylistItems(self)", "def playlist_search(search_term, results=5):\r\n if search_term:\r\n url = PLAYLIST_SEARCH_URL.format(API_KEY, util.web.quote(search_term.encode('ascii', 'ignore')))\r\n response = util.web.http_get(url=url, json=True, referer='https://tinychat.com')\r\n\r\n if response['json'] is not None:\r\n play_lists = []\r\n try:\r\n if 'items' in response['json']:\r\n for i, item in enumerate(response['json']['items']):\r\n if i == results:\r\n return play_lists\r\n playlist_id = item['id']['playlistId']\r\n playlist_title = item['snippet']['title'].encode('ascii', 'ignore')\r\n play_list_info = {\r\n 'playlist_title': playlist_title,\r\n 'playlist_id': playlist_id\r\n }\r\n play_lists.append(play_list_info)\r\n except KeyError as ke:\r\n log.error(ke, exc_info=True)\r\n return None", "def download_songs(playlist_url):\n command_string = 'youtube-dl -x --audio-format wav --postprocessor-args \"-ar 44100 -ac 1\" --output \"Songs/%(' \\\n 'title)s_%(id)s.%(ext)s\" ' + \\\n playlist_url\n args = shlex.split(command_string)\n subprocess.call(args)", "def Playlist(self, type='audio'):\n self.logger.debug(\"Loading Playlist of type \" + type)\n xbmc = Server(self.url('/jsonrpc', True))\n if type == 'video':\n return xbmc.Playlist.GetItems(playlistid=1, properties=['year', 'showtitle', 'season', 'episode', 'runtime'])\n\n return xbmc.Playlist.GetItems(playlistid=0, properties=['artist', 'title', 'album', 'duration'])", "def songs_list(name_of_album):\r\n songs = \"\"\r\n data = dbase()\r\n data = data[name_of_album][0]\r\n for song in data.keys():\r\n songs += song\r\n songs += \", \"\r\n return songs[:-2]", "def show_playlist(self, playlist_name):\n if playlist_name.lower() not in self._playlists:\n print(f\"Cannot show playlist {playlist_name}: Playlist does not exist\")\n return\n playlist = self._playlists[playlist_name.lower()]\n print(f\"Showing playlist: {playlist_name}\")\n if not playlist.videos:\n print(\"No videos here yet\")\n for video in playlist.videos:\n print(video)", "def get_playlist(self) -> List[Dict[str, Any]]:\n return copy.deepcopy(self._playlist)", "def get_songs(self):\n search_object = {\"size\":25000,\n 'query': {'term': {FIELD_FINGERPRINTED: True}}, \"fields\": [FIELD_SONGNAME, FIELD_FILE_SHA1,\n FIELD_TOTAL_HASHES]}\n response = self.cursor.search(index = SONGS_INDEXNAME, body=search_object)\n #print(\"get_songs response: \",response)\n arr = []\n for hit in response[\"hits\"][\"hits\"]:\n dct = {\"song_name\":hit['_source'][FIELD_SONGNAME],\"total_hashes\":hit['_source'][FIELD_TOTAL_HASHES],\n \"file_sha1\":hit['_source'][FIELD_FILE_SHA1]}\n arr.append(dct)\n return arr", "def get_songs(self, offset=None):\n return self.__get('songs')", "def get_playlist_tracks_id(self, username, playlist_name):\n track_list = []\n playlist_id = self.get_playlist_id(username, playlist_name)\n tracks = self.spotify.playlist_tracks(playlist_id)\n for i in range(len(tracks['items'])):\n track_list.append(tracks['items'][i]['track']['id'])\n while tracks['next']: # If there are more tracks\n tracks = self.spotify.next(tracks)\n for i in range(len(tracks['items'])):\n track_list.append(tracks['items'][i]['track']['id'])\n return track_list", "def song_view(song_id):\r\n querystring = apiurl_musixmatch + \"track.lyrics.get?track_id=\" + urllib2.quote(\r\n song_id) + \"&apikey=\" + apikey_musixmatch + \"&format=plain\"\r\n try:\r\n request = urllib2.Request(querystring)\r\n # timeout set to 4 seconds; automatically retries\r\n response = urllib2.urlopen(request, timeout=4)\r\n # raw = response.read()\r\n print colored.green(\"Starting\", bold=12)\r\n all_data = ''\r\n while True:\r\n do_task()\r\n print '\\b.'\r\n sys.stdout.flush()\r\n data = response.read(2048)\r\n if not data:\r\n break\r\n all_data += data\r\n time.sleep(0.4)\r\n print \"\\n\"\r\n json_obj = json.loads(all_data.decode(\"utf-8\"))\r\n body = len(json_obj[\"message\"][\"body\"])\r\n if body == 0:\r\n print colored.red(\"No lyrics found\", bold=12)\r\n else:\r\n print colored.cyan(json_obj[\"message\"][\"body\"][\"lyrics\"][\"lyrics_body\"], bold=12)\r\n except socket.timeout:\r\n print \"Timeout raised and caught\"", "def get_liked_songs(self, station_id):\n\n feedbacks = self.get_station_feedbacks(station_id)\n songs = []\n for feedback in feedbacks:\n songs.append({\n \"name\": feedback[\"songTitle\"],\n \"album\": feedback[\"albumTitle\"],\n \"artist\": feedback[\"artistName\"]\n })\n return songs", "def get_song_data(self, song_name=None, song_id=None):\n if song_name is None and song_id is None:\n print(\"ERROR: Require one of song name and song ID to retrieve song data.\")\n return []\n elif song_name is None:\n song_name = \"%\" # match any string\n\n try:\n # Auto-close.\n with closing(self.connection) as con:\n # Auto-commit\n with con:\n # Auto-close.\n with closing(con.cursor()) as cursor:\n cursor.execute(\"\"\"\n SELECT\n song.name, artist.name, song.duration_ms, song.popularity,\n song.id, song.spotify_uri, song.acousticness, song.danceability,\n song.energy, song.instrumentalness, song.liveness, song.loudness,\n song.speechiness, song.valence, song.tempo, song.mode,\n song.musical_key, song.time_signature\n\n FROM (\n SELECT *\n FROM songs JOIN nodes ON node_id == id\n WHERE name LIKE (?)\n ) AS song JOIN nodes AS artist ON main_artist_id == artist.id;\n \"\"\", (song_name,))\n return [\n dict(\n song_name=x[0], artist_name=x[1], duration_ms=x[2], popularity=x[3],\n id=x[4], spotify_uri=x[5], acousticness=x[6], danceability=x[7],\n energy=x[8], instrumentalness=x[9], liveness=x[10], loudness=x[11],\n speechiness=x[12], valence=x[13], tempo=x[14], mode=x[15],\n musical_key=x[16], time_signature=x[17],\n ) for x in cursor.fetchall()\n if song_id is None or song_id == x[4]\n ]\n\n except sqlite3.OperationalError as e:\n print(\"ERROR: Could not retrieve data for song with name '{}': {}\".format(song_name, str(e)))\n return []", "def add_from_playlist(self, params):\n lists = params\n\n # Lists to load\n names = []\n for n in self.listIDs.keys():\n for l in lists:\n if 'playlist:' + l in n:\n names.append(n)\n\n self.add_playlist(names)", "def show_playlist(self, playlist_name):\n if self.playlists[playlist_name.lower()]!=[]:\n print(f\"Showing playlist: {playlist_name}\")\n for i in self.playlists[playlist_name.lower()]:\n videos = self._video_library.get_all_videos()\n templist = []\n\n def converttostr(input_seq, seperator):\n # Join all the strings in list\n final_str = seperator.join(input_seq)\n return final_str\n\n for vid in videos:\n if i == vid.video_id:\n templist.append([vid.title,vid.video_id,vid.tags])\n\n print(f\" {templist[0][0]} ({templist[0][1]}) [{converttostr(list(templist[0][2]), ' ')}]\")\n else:\n print(f\"Showing playlist: {playlist_name}\")\n print(\" No videos here yet\")\n #print(\"show_playlist needs implementation\")", "def get_song(self, song_id):\n url = get_song_url(song_id)\n result = self.common_get_request(url,headers)\n\n return result['songs'][0]", "def get_song(self, song_id):\n url = get_song_url(song_id)\n result = self.get_request(url)\n\n return result['songs'][0]", "def get_playlist_info(self, username, playlist_name):\n playlist_info = []\n playlist_id = self.get_playlist_id(username, playlist_name)\n playlist_items = self.spotify.playlist_tracks(playlist_id)\n for i in range(len(playlist_items['items'])):\n print(playlist_items['items'][i])\n playlist_info.append([playlist_items['items'][i]['track']['name'], \n playlist_items['items'][i]['track']['artists'][0]['name'],\n playlist_items['items'][i]['track']['album']['name']])\n while playlist_items['next']: # If there are more tracks\n playlist_items = self.spotify.next(playlist_items)\n for i in range(len(playlist_items['items'])):\n playlist_info.append([playlist_items['items'][i]['track']['name'], \n playlist_items['items'][i]['track']['artists'][0]['name'],\n playlist_items['items'][i]['track']['album']['name']])\n return playlist_info", "def getAllPlaylists(self,name):\n return [p for p in self.playlists if p.title == name]", "def get_song_list(self, artist: str) -> List[str]:\n artist = self.just_replace_strings_with_nothing(artist)\n\n url = self.list_url + artist + '/songs'\n\n resp = requests.get(url)\n\n content = bs4.BeautifulSoup(resp.content)\n\n song_list = content.text[content.text.index(\n 'MP3s') + 5:content.text.index('About Song List')]\n\n song_list = re.sub('\\n', ',', song_list)\n song_list = re.sub(',+', ',', song_list)\n song_list = re.sub(', ,', ', ', song_list)\n\n song_list = re.split(',', song_list)\n for i in range(len(song_list)):\n song_list[i] = song_list[i].lstrip(' ')\n song_list[i] = re.sub(\"[.,']\", '', song_list[i])\n song_list[i] = re.sub(\"&\", 'and', song_list[i])\n song_list[i] = re.sub('\\s+', ' ', song_list[i])\n\n song_list = [i for i in song_list if i != '']\n\n return song_list", "def get(self, playlist_id):\n playlist_model = Playlist.get_by_id(int(playlist_id))\n json = []\n\n for key in playlist_model.followers:\n youtify_user_model = db.get(key)\n json.append(get_youtify_user_struct(youtify_user_model))\n\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(json))", "def get_playlist(self, playlist_uuid, *args):\n\n if playlist_uuid not in self.playlists:\n return rsp_codes[2]\n\n rsp = rsp_codes[0]\n rsp['playlist'] = self.playlists[playlist_uuid]\n return rsp", "def get_yt_playlist(self, list_id, url=None):\n if self._settings.enable_yt_dl and url:\n try:\n if not self._yt_dl:\n raise YouTubeException(\"yt-dlp is not initialized!\")\n\n self._yt_dl.update_options({\"noplaylist\": False, \"extract_flat\": True})\n info = self._yt_dl.get_info(url, skip_errors=False)\n if \"url\" in info:\n info = self._yt_dl.get_info(info.get(\"url\"), skip_errors=False)\n\n return info.get(\"title\", \"\"), [(e.get(\"title\", \"\"), e.get(\"id\", \"\")) for e in info.get(\"entries\", [])]\n finally:\n # Restoring default options\n if self._yt_dl:\n self._yt_dl.update_options({\"noplaylist\": True, \"extract_flat\": False})\n\n return PlayListParser.get_yt_playlist(list_id)", "def create_playlist(access_token):\n request_body = json.dumps({\n \"name\": \"SpotiAdd\",\n \"description\": \"All Liked Youtube Videos\",\n \"public\": True\n })\n userId = getUserId(access_token)\n query = \"https://api.spotify.com/v1/users/{}/playlists\".format(\n userId)\n response = requests.post(\n query,\n data=request_body,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(access_token)\n }\n )\n response_json = response.json()\n # print(\"create_playlist_id : {}\".format(response_json),file = sys.stdout)\n return response_json[\"id\"]", "def list(ctx):\n\tfor songName in songs:\n\t\tyield from bot.send_message(ctx.message.author,songName)", "def list_playlists(self):\n endpoint = '/me/playlists'\n url = f'{self.api_base_url}{endpoint}'\n\n req = requests.get(url, headers=self.__header_bearer())\n if req.status_code == 200:\n items = req.json()\n if 'items' in items:\n return [[item['name'], item['id']] for item in items['items']]\n return False", "def getSonglist(self, songlistID, limit = 1000, offset = 0, total = False):\n currAPIVersion = self.config['apiVersion']\n currAPIURL = URL_NEAPIS[sys._getframe().f_code.co_name]\n currAPIURL = currAPIURL[min(currAPIVersion, len(currAPIURL) - 1)]\n currDict = {\n 'id' : songlistID,\n 'limit': limit,\n 'n' : limit,\n 'offset' : offset,\n 'total' : total,\n }\n\n currC, currR = self._mySubmit(currAPIURL, currDict)\n self.apiLog.info(\"%s Json Loads Begin\", sys._getframe().f_code.co_name)\n currR = json.loads(currR)\n self.apiLog.info(\"%s Json Loads End\", sys._getframe().f_code.co_name)\n self.updateCookie(currC)\n self.checkCode(currR['code'])\n\n return currR, currAPIURL[2]", "def fetch_song_data(self, song_ids):\n\t\ttracks_base_url = \"https://api.spotify.com/v1/tracks\"\n\t\theaders = {}\n\t\ttrack_ids = ','.join(song_ids)\n\t\tquery_params = \"/?ids=\"+track_ids\n\t\ttracks_url = tracks_base_url + query_params\n\t\ttracks={}\n\t\theaders['Authorization'] = f\"Bearer {self.token}\"\n\n\t\ttry:\n\t\t\treq = request.Request(url=tracks_url,data=None, headers=headers)\n\t\t\tresponse = request.urlopen(req).read().decode()\n\t\t\ttracks = json.loads(response)\n\t\t\tlogging.info(\"Successfully fetched songs from Spotify!\")\n\t\texcept error.URLError as e:\n\t\t\tresponse = e.read().decode(\"utf8\", 'ignore')\n\t\t\tlogging.error(response)\n\t\treturn tracks", "def playlist(self, channel_list, limit, part='contentDetails', only_id=1):\n playlist_details = {}\n key = self.keylist[self.keyindex]\n url_pi = 'https://www.googleapis.com/youtube/v3/playlistItems/'\n\n if limit <= 50 and limit > 0:\n maxResults = limit\n else:\n maxResults = 50\n\n for chnlid in channel_list:\n vidcount = initial = 0\n nextPageToken = ''\n results = []\n # print('UU'+chnlid[2:])\n try:\n while nextPageToken or initial == 0:\n querystring = {\n 'playlistId': 'UU' + chnlid[2:],\n 'part': part,\n 'key': key,\n 'pageToken': nextPageToken,\n 'maxResults': maxResults\n }\n\n\n response = request_handler(self, url_pi, params=querystring, wait=5) #ids=chnlid)\n # print(\"#\"*5, response.json())\n # print(response.json())\n if response.get('error'):\n while response['error']['errors'][0]['reason'] == 'quotaExceeded' or \\\n response['error']['errors'][0]['reason'] == 'dailyLimitExceeded':\n key = keychange(self)\n querystring = {\n 'playlistId': 'UU' + chnlid[2:],\n 'part': part,\n 'key': key,\n 'pageToken': nextPageToken,\n 'maxResults': maxResults\n }\n\n response = request_handler(self, url_pi, params=querystring, wait=5, ids=chnlid)\n\n if response.get('error'):\n playlist_details.update({chnlid: 'error'})\n if response['error']['errors'][0]['reason'] == 'keyInvalid':\n return [{chnlid:'error'}]\n break\n\n if response.get('Interneterror'):\n results.append(response)\n #print(playlist_details)\n break\n\n if limit == -1:\n limit = response['pageInfo']['totalResults']\n # print(response,response.text)\n \n if only_id == 1:\n for i in range(response['pageInfo']['resultsPerPage']):\n try:\n results.append(response['items'][i]['contentDetails']['videoId'])\n except:\n pass\n else:\n results.append(response['items'])\n nextPageToken = response.get('nextPageToken')\n vidcount += len(response['items'])\n if vidcount >= limit:\n break\n print(\"Video id found: \", chnlid, \" : \", vidcount)\n #{'error':[]}\n \n initial += 1\n \n playlist_details.update({chnlid:results})\n\n except Exception as e:\n print(\"Error: \", e, \" : \", traceback.print_exc())\n playlist_details[chnlid] = 'error'\n break\n\n return playlist_details", "def get_song_by_id(self, song_id: int):\n #print(\"song_id: \",song_id)\n search_object = {'query': {'term': {\"_id\": song_id}}, \"fields\": [FIELD_SONGNAME, FIELD_FILE_SHA1, FIELD_TOTAL_HASHES]}\n response = self.cursor.search(index=SONGS_INDEXNAME, body=search_object)\n #print(\"response: \",response)\n dct = {\"song_name\":response[\"hits\"][\"hits\"][0]['_source'][FIELD_SONGNAME],\n \"total_hashes\":response[\"hits\"][\"hits\"][0]['_source'][FIELD_TOTAL_HASHES],\n \"file_sha1\":response[\"hits\"][\"hits\"][0]['_source'][FIELD_FILE_SHA1]}\n #print(\"dct: \",dct)\n return dct", "def get_playlist_tracks_adapter(json_response):\n\n ret = {\"result\": []}\n for item in json_response['items']:\n ret[\"result\"].append(json_to_track_info(item[\"track\"]))\n return ret", "def readPlaylistData(self):\n return gatherPlaylistData(10)", "def song(song_id):\n return process_input(song_id) #jsonify(recomendations)", "def search_song(self, name):\n self.logger.debug('Searched for Song: {}'.format(name))\n results = self.sp.search(q='track:' + name, type='track')\n songs = [song for song in results['tracks']['items']]\n i = 1\n songs_ls = []\n table_ls = []\n for song in songs:\n table_ls.append([i,\n song['name'][0:20].strip(),\n song['album']['name'][0:20].strip(),\n \"%0.2f\" % (song['duration_ms'] / 60000),\n song['popularity']])\n songs_ls.append(song['uri'])\n i = i + 1\n return songs_ls, table_ls", "def show_playlist(self, playlist_name):\n playlist_exists = False\n for playlist in list(self.playlists.keys()):\n if playlist_name.upper() == playlist.upper():\n playlist_exists = True\n real_playlist_name = playlist\n break\n if playlist_exists:\n print(f\"Showing playlist: {playlist_name}\")\n if len(self.playlists[real_playlist_name]) == 0:\n print(\"\\tNo videos here yet\")\n else:\n for song in self.playlists[real_playlist_name]:\n video = self._video_library.get_video(song)\n tags = str(video.tags)\n tags=tags.replace(\"'\",\"\")\n tags=tags.replace(\",\", \"\") \n tags=tags.replace(\")\", \"\") \n tags=tags.replace(\"(\", \"\") \n print(f\"{video.title} ({video.video_id}) [{tags}]\")\n\n else:\n print(f\"\\tCannot show playlist {playlist_name}: Playlist does not exist\")\n\n # print(\"show_playlist needs implementation\")" ]
[ "0.81859416", "0.8030631", "0.79889697", "0.75973374", "0.75427765", "0.73269945", "0.73071337", "0.7261857", "0.713018", "0.7062886", "0.70463115", "0.6993529", "0.6951676", "0.69032377", "0.6889015", "0.6864356", "0.68641925", "0.68147", "0.68029433", "0.6800521", "0.6796981", "0.67931414", "0.67633337", "0.6759192", "0.67575365", "0.67147774", "0.6627006", "0.66152114", "0.66038084", "0.6555001", "0.6538752", "0.6526555", "0.6522728", "0.65146565", "0.65063703", "0.64825314", "0.6452483", "0.644309", "0.6428663", "0.6420069", "0.63960475", "0.63870984", "0.6368436", "0.63542575", "0.63196254", "0.63177407", "0.63059413", "0.6302301", "0.6290882", "0.62888837", "0.6287739", "0.6287084", "0.62802047", "0.6268577", "0.6251071", "0.6247147", "0.62178195", "0.62145156", "0.62056065", "0.6188467", "0.6167734", "0.6165521", "0.615716", "0.6153262", "0.6147104", "0.6140297", "0.6130998", "0.61267024", "0.61250323", "0.6118866", "0.6106836", "0.6101899", "0.6101614", "0.6097669", "0.60929877", "0.6085724", "0.60857177", "0.607802", "0.6073692", "0.60652995", "0.6062365", "0.6027481", "0.60266584", "0.60231763", "0.6015176", "0.6010854", "0.6000026", "0.59924436", "0.5989881", "0.59871906", "0.59838283", "0.59582657", "0.59566563", "0.595658", "0.59534466", "0.59530586", "0.59477085", "0.5939984", "0.59364533", "0.59345585" ]
0.6986906
12
[adds a song to a playlist]
def add_song_to_playlist(self, song_uri, playlist_id, user=None): if song_uri[0] in self.list_pl_songs(playlist_id, user=None): logging.debug('Song already in playlist') else: if user: self.sp.user_playlist_add_tracks(user, playlist_id, song_uri) else: self.sp.user_playlist_add_tracks( self.user, playlist_id, song_uri)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_song(self, song: Song):\n self.playlist.append(song)", "def add_the_song_to_playlist(self):\n com_util.tap_on(self.driver, element['AddToPlaylist'])\n # com_util.send_to(self.driver, element['EnterThePlaylist'], 'My Songs')\n com_util.tap_on(self.driver, element['ClickMySongs'])\n # com_util.tap_on(self.driver, element['SaveBtn'])\n com_util.tap_on(self.driver, element['CancelBtn'])\n com_util.tap_on(self.driver, element['DownArrow'])", "def add_song_to_playlist(self):\n #populate our songs dictionary\n self.get_liked_videos()\n\n #collect all of uri\n uris = []\n for song,info in self.all_song_info.items():\n uris.append(info[\"spotify_uri\"])\n\n #create a new playlist\n playlist_id = self.create_playlist()\n\n #add all songs into new playlist\n\n #Spotipy can only add 100 songs at a time to a playlist that is why this method is taken\n g = len(uris)\n if g > 100:\n s = 0\n e = 99\n while g > 100:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:e])\n g -= 100\n s = e + 1\n e += 100\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:])\n else:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris)", "def add_song(self, song):\n self.songs.append(song)", "def add_song(self, song, position=None):\n if position:\n self.tracks.insert(position, song)\n else:\n self.tracks.append(song)", "def add_song(self, song, position=None):\n if position is None:\n self.tracks.append(song)\n else:\n self.tracks.insert(position, song)", "def add_song(self):\n settings = dict(initialdir=pathlib.Path().absolute(), title=\"Choose songs\", filetypes=(\n (\"flac files\", \"*.flac\"),\n (\"mp3 files\", \"*.mp3\"),\n (\"all files\", \"*\")))\n\n song = filedialog.askopenfilename(**settings)\n\n self.update_playlist(song)\n self.listbox.insert(\"end\", self.song_list[-1]['name'])", "def playlist_add(nums, playlist):\n nums = _parse_multi(nums)\n\n if not g.userpl.get(playlist):\n playlist = playlist.replace(\" \", \"-\")\n g.userpl[playlist] = Playlist(playlist)\n\n for songnum in nums:\n g.userpl[playlist].songs.append(g.model.songs[songnum - 1])\n dur = g.userpl[playlist].duration\n f = (len(nums), playlist, g.userpl[playlist].size, dur)\n g.message = F('added to saved pl') % f\n\n if nums:\n save_to_file()\n\n g.content = generate_songlist_display()", "def add_to_playlist(self, playlist_uri=\"spotify:playlist:3VUBchphbcLwE5WdqBW3gv\", user=\"robbo1992\"):\n if playlist_uri is None or self.song_uri is None:\n log.warn(\"Object attributes are None, cannot add to playlist.\")\n return\n else:\n log.debug(\"Adding song %s to playlist.\" %str(self.song_uri))\n results = self.sp.user_playlist_add_tracks(user, playlist_uri, [self.song_uri])\n log.debug(\"Adding to playlist results: %s\" % results)", "def import_song(self, song, playlist):\n\n try:\n song_uri = self.find_song_uri(song)\n except SongNotFoundError as e:\n print(f\"could not find song {song} to add to playlist '{playlist['name']}'\")\n else:\n self.add_song_to_playlist(song_uri, playlist[\"id\"])", "def associate_song(self, song):\n self.songs.append(song)", "def add_song_to_playlist(self, song_uri, playlist_id):\n\n endpoint = f\"/playlists/{playlist_id}/tracks\"\n self._send(endpoint, \"POST\", params={\"uris\": song_uri})", "def add_song(self, song, position=None):\n\n song_found = find_object(song, self.tracks)\n if song_found is None:\n song_found = Song(song, self.artist)\n if position is None:\n self.tracks.append(song_found)\n else:\n self.tracks.insert(position, song_found)", "def add_song(self, song: Song) -> None:\n\n self.songs.append(song)\n self.set_song_count(len(self.songs))", "def add_song(self, song, position=None):\n\n # Use find_object to see if the song exist already.\n song_found = find_object(song, self.tracks) # look for song.tracks to see if it exist in the list\n if song_found is None: # if song is not found\n song_found = Song(song, self.artist) # We create new song using \"Song\" function and assign it to song_found\n if position is None: # If there are no songs in this track\n self.tracks.append(song_found) # Add this song_found in the first position\n else: # else if there are already some songs in the track\n self.tracks.insert(position, song_found) # inserts the position and song in self.tracks list", "def new_song():\n song_id = int(request.args['song_id'])\n track_info = shiva.get_tracks([song_id])[song_id]\n vlc.add_song(track_info['path'])\n return 'ok'", "def _http_add(self, mrl: MRL):\n self._http_request(\"in_play&input=%s\" % mrl)\n self.get_playlist()", "def add_song():\n options = queue.instantiate_options()\n raw_queue = queue.instantiate_queue()\n track_id = request.args.get('song')\n\n for song in raw_queue:\n if song['track_id'] == track_id[14:]:\n return json.dumps({'error': 'Cannot add a song already in the queue'})\n\n num_songs_added = 0\n for song in raw_queue:\n if song['added_by'] == session['id']:\n num_songs_added += 1\n\n if num_songs_added >= int(options['max_individual_songs']):\n print('user reached max songs')\n return json.dumps({'error': \"You are not allowed to add any more songs until one plays\"})\n\n song_obj = create_song(track_id, added_by=session['id'])\n queue.addSong(song_obj)\n queue_change()\n return json.dumps({'success': 'added ' + track_id})", "def add_music(request, music_id: int) -> HttpResponse:\n music_item = get_object_or_404(Music, id=music_id)\n\n if music_item in request.user.profile.playlist.all():\n return HttpResponse('Success')\n\n playpos = PlayPosition(\n position=music_item,\n plist=request.user.profile\n )\n playpos.add_order()\n playpos.save()\n\n return HttpResponse('Success')", "def add_to_playlist(track_ids, playlist_name):\n \n playlist_id = find_playlist(playlist_name)\n \n spotifyObject.user_playlist_add_tracks(config.USERNAME, playlist_id,\n track_ids, position=None)", "def add_tracks_to_lib(title, gm_api):\r\n # Extract single playlist\r\n if not (gm_api.is_authenticated):\r\n sys.stderr.write('Error: api not authenticated')\r\n return None\r\n allPLs = gm_api.get_all_user_playlist_contents()\r\n\r\n pl= next((p for p in allPLs if p['name'] == title), None)\r\n if pl == None:\r\n sys.stderr.write('Error: could not find desired playlist')\r\n return None\r\n # add playlist's tracks to library\r\n # to_add = []\r\n num_added = 0\r\n num_bad_data = 0\r\n for t in pl['tracks']:\r\n metadata = t.get('track', None)\r\n if metadata != None:\r\n #to_add.append(metadata['storeId'])\r\n gm_api.add_store_tracks([metadata['storeId']])\r\n num_added += 1\r\n else:\r\n num_bad_data += 1\r\n # Gmusicapi call\r\n #gm_api.add_store_tracks(to_add)\r\n #print(\"Added \", len(to_add), \" tracks to library.\\n\")\r\n print(\"Added \", num_added, \" tracks to library.\\n\")\r\n print(\"Unable to add \", num_bad_data, \" tracks.\\n\")", "async def add(self, ctx, query):\n if ctx.guild is None:\n await ctx.reply(\"This command can only be used in a server, not in DMs.\")\n raise commands.CommandError(\"Invoker not in a guild.\")\n\n if ctx.voice_client is None or ctx.voice_client.channel is None:\n await ctx.reply(f\"I am not in a voice channel, invite me first with `{self.bot_config['prefix']}join`.\")\n raise commands.CommandError(\"Bot not connected to a voice channel.\")\n\n if ctx.author.voice is None or ctx.author.voice.channel is None:\n await ctx.reply(\"You need to be in a voice channel to use this command.\")\n raise commands.CommandError(\"Invoker not connected to a voice channel.\")\n\n if ctx.voice_client is not None and ctx.author.voice.channel != ctx.voice_client.channel:\n await ctx.reply(\"You need to be in the same voice channel as the bot to use this command.\")\n raise commands.CommandError(\"Invoker not in same voice channel as bot.\")\n\n if ctx.voice_client is not None and ctx.voice_client.channel is not None:\n controller = SpotifyController.get_instance(ctx.voice_client.channel.id)\n if controller is None:\n await ctx.reply(f\"I'm not playing anything at the moment.\")\n raise commands.CommandError(\"Bot not connected to active spotify session.\")\n else:\n await ctx.reply(f\"I am not in a voice channel, invite me first with `{self.bot_config['prefix']}join`.\")\n raise commands.CommandError(\"Bot not connected to a voice channel.\")\n\n print(f\"Adding {query} to playlist\")\n controller = SpotifyController.get_instance(ctx.voice_client.channel.id)\n sp = controller.get_playlist_api()\n\n uri = None\n item_info = None\n item_type = None\n\n # If link, queue by link\n if query.startswith(\"http://\") or query.startswith(\"https://\"):\n m = SPOTIFY_LINK_REGEX.match(query)\n if m:\n uri = f\"spotify:{m.group('type')}:{m.group('id')}\"\n item_type = m.group('type')\n if item_type == \"track\":\n try:\n item_info = sp.track(m.group('id'))\n except SpotifyException:\n await ctx.send(f\"Cannot add! Invalid track!\")\n return\n elif item_type == \"album\":\n try:\n item_info = sp.album(m.group('id'))\n except SpotifyException:\n await ctx.send(f\"Cannot add! Invalid album!\")\n return\n elif item_type == \"playlist\":\n try:\n item_info = sp.playlist(m.group('id'))\n except SpotifyException:\n await ctx.send(f\"Cannot add! Invalid or private playlist!\")\n return\n else:\n await ctx.send(f\"Type {item_type} not supported!\")\n return\n\n print(f\"Converted link to ID '{uri}'\")\n else:\n await ctx.send(f\"Only spotify links are supported!\")\n return\n\n # If spotify uri, queue by link\n if uri is None:\n m = SPOTIFY_URI_REGEX.match(query)\n if m:\n uri = f\"spotify:{m.group('type')}:{m.group('id')}\"\n item_type = m.group('type')\n if item_type == \"track\":\n try:\n item_info = sp.track(m.group('id'))\n except SpotifyException:\n await ctx.send(f\"Cannot add! Invalid track!\")\n return\n elif item_type == \"album\":\n try:\n item_info = sp.album(m.group('id'))\n except SpotifyException:\n await ctx.send(f\"Cannot add! Invalid album!\")\n return\n elif item_type == \"playlist\":\n try:\n item_info = sp.playlist(m.group('id'))\n except SpotifyException:\n await ctx.send(f\"Cannot add! Invalid or private playlist!\")\n return\n else:\n await ctx.send(f\"Type {item_type} not supported!\")\n return\n print(f\"Converted URI to ID '{uri}'\")\n\n # Else, try to search\n if uri is None:\n await ctx.send(f'Searching not supported yet.')\n return\n\n # Add URI\n if uri is not None:\n if item_type == \"track\":\n sp.playlist_add_items(controller.playlist[\"id\"], items=[uri])\n elif item_type == \"album\":\n album_tracks = controller.get_album_tracks(item_info['id'])\n i, max_tracks = 0, 50\n while i < len(album_tracks):\n block = [t['uri'] for t in album_tracks[i:i+max_tracks]]\n sp.playlist_add_items(controller.playlist[\"id\"], items=block)\n i += max_tracks\n elif item_type == \"playlist\":\n playlist_tracks = controller.get_playlist_tracks(item_info['id'])\n i, max_tracks = 0, 50\n while i < len(playlist_tracks):\n block = [t['uri'] for t in playlist_tracks[i:i+max_tracks]]\n sp.playlist_add_items(controller.playlist[\"id\"], items=block)\n i += max_tracks\n else:\n await ctx.send(f\"Cannot add! Type {item_type} not supported!\")\n return\n\n try:\n controller.update_playlist()\n except IndexError as e:\n print(e, file=sys.stderr)\n\n msg_embed = Embed()\n if item_type == \"track\":\n full_title = SpotifyController.format_full_title(item_info)\n try:\n thumbnail = item_info['album']['images'][0]['url']\n except IndexError:\n thumbnail = None\n msg_embed.description = f\"Added [{full_title}]({item_info['external_urls']['spotify']}) to queue!\"\n msg_embed.set_thumbnail(url=thumbnail)\n elif item_type == \"album\":\n full_title = SpotifyController.format_full_title(item_info)\n try:\n thumbnail = item_info['images'][0]['url']\n except IndexError:\n thumbnail = None\n num_tracks = item_info['tracks']['total']\n msg_embed.description = f\"Added album [{full_title}]({item_info['external_urls']['spotify']}) \" \\\n f\"({num_tracks} tracks) to queue!\"\n msg_embed.set_thumbnail(url=thumbnail)\n elif item_type == \"playlist\":\n title = item_info['name']\n try:\n thumbnail = item_info['images'][0]['url']\n except IndexError:\n thumbnail = None\n num_tracks = item_info['tracks']['total']\n msg_embed.description = f\"Added playlist [{title}]({item_info['external_urls']['spotify']}) \" \\\n f\"({num_tracks} tracks) to queue!\"\n msg_embed.set_thumbnail(url=thumbnail)\n else:\n # Shouldn't happen, but lets add a message anyway...\n msg_embed.description = f\"Unknown {item_type} item added to queue!\"\n await ctx.reply(embed=msg_embed)", "def add_from_playlist(self, params):\n lists = params\n\n # Lists to load\n names = []\n for n in self.listIDs.keys():\n for l in lists:\n if 'playlist:' + l in n:\n names.append(n)\n\n self.add_playlist(names)", "def add_song(self, name, year, title):\n album_found = find_object(name, self.albums)\n if album_found is None:\n album_found = Album(name, year, self.name)\n self.add_album(album_found)\n album_found.add_song(title)", "def add_songs(self, song, position=None):\n song_found = find_object(song, self.tracks)\n if song_found is None:\n song_found = Song(song, self.artist)\n if position is None:\n self.tracks.append(song_found)\n else:\n self.tracks.insert(position, song_found)", "def add_songs(playlist_id, user_id, uris):\n\t# TODO: ensure duplicates not added or else they'll pop to the top of the playlist\n\t# Not going to do this right now. If you want the playlist to be a record of daily tracks, \n\t# doesn't make sense to get rid of duplicates.\n\n\tfor uri in uris:\n\t\tlogging.debug('Adding uri {0}'.format(uri))\n\ttoken = get_token()\n\theaders = {'Authorization': 'Bearer ' + token}\n\tbase_url = SPOTIFY_API_HOST + 'users/{0}/playlists/{1}/tracks?position=0&uris={2}'\n\n\tformatted_uris = [quote('spotify:track:{0}'.format(uri), safe='') for uri in uris if uri] # Probably shouldn't quote\n\turi_string = ','.join(formatted_uris)\n\n\turl = base_url.format(SPOTIFY_USER_ID, SPOTIFY_PLAYLIST_ID, uri_string)\n\tresponse = requests.post(url, headers=headers)\n\tlogging.debug('Called add url {0}'.format(url))\n\tlogging.debug('Got response {0}'.format(response.text))\n\tif response.status_code == 429:\n\t\tlogging.warning('!!!!!!!!!!!!!!!!!!!!!GOT STATUS CODE 429; RATE LIMITING FROM SPOTIFY!!!!!!!!!!!!!!!!!!')", "def addSong(self, song):\n queue = self.instantiate_queue()\n history = self.instantiate_history()\n options = self.instantiate_options()\n\n queue = [song for song in queue if song['explicit']]\n queue.append(song.to_dict())\n\n if len(queue) < 5:\n self.addImplicit(queue, history, fallback_song=song.to_dict())\n \n queue = self.sortSongs(queue)\n self.cache.set('queue', queue)", "def add_playlist(self, names, printQueue=False):\n idtoadd = [self.listIDs[n] for n in names]\n self.spotify.add_playlist_to_queue(idtoadd)\n\n if printQueue:\n self.console.print('This is your current queue: ')\n self.console.print(self.spotify.queue.loc[:10, ['name', 'album', 'artist']])", "async def queue(self, msg, song):\n title1 = await Downloader.get_info(self, url=song)\n title = title1[0]\n data = title1[1]\n # NOTE:needs fix here\n if data['queue']:\n await self.playlist(data, msg)\n # NOTE: needs to be embeded to make it better output\n return await msg.send(f\"Added playlist {data['title']} to queue\")\n self.player[msg.guild.id]['queue'].append(\n {'title': title, 'author': msg})\n return await msg.send(f\"**{title} added to queue**\".title())", "def add_to_playlist(file, list, data = None):\n\n if not list:\n return\n\n exists = os.path.isfile(list)\n playlist = open(list, 'a')\n if not exists:\n playlist.write(\"#EXTM3U\\n\")\n\n if data:\n metadata = u\"#EXTINF: {}, {} - {} \\n\".format(data['time'], data['artist'], data['title'])\n playlist.write(metadata.encode('utf8'))\n\n playlist.write(file + \"\\n\")\n playlist.close()\n try:\n print 'Added to {}'.format(os.path.basename(list))\n except:\n pass", "def add_to_playlist(self, playlist_name, video_id):\n if playlist_name.lower() not in self.playlists:\n print(\"Cannot add video to\", playlist_name, end=\"\")\n print(\": Playlist does not exist\")\n elif self._video_library.get_video(video_id) is None:\n print(\"Cannot add video to\", playlist_name, end=\"\") \n print(\": Video does not exist\")\n elif self._video_library.get_video(video_id).flagged:\n print(f\"Cannot add video to {playlist_name}: Video is currently flagged (reason: {self._video_library.get_video(video_id).flag_reason})\")\n elif self._video_library.get_video(video_id) in self.playlists[playlist_name.lower()]:\n print(\"Cannot add video to\", playlist_name, end=\"\") \n print(\": Video already added\")\n else:\n print(\"Added video to\", playlist_name, end=\"\") \n print(\":\",self._video_library.get_video(video_id).title)\n self.playlists[playlist_name.lower()].append(self._video_library.get_video(video_id))", "def add_song_to_pl(self, song, pl):\n to_send = self.db.add_song_to_pl(song, pl)\n if not to_send:\n to_send = SUCCESS\n self.send_message(to_send)", "def add_tracks():\n sp = credentials()\n tracks = spotify_tracklist()\n playlist_id = grab_playlist()\n sp.user_playlist_add_tracks('truetiming', playlist_id, tracks)", "async def playlist(self, ctx, *, query):\n # Setup the headers with the token that should be here\n headers = {\"Authorization\": \"Bearer {}\".format(self._token)}\n opts = {\"q\": query, \"type\": \"playlist\"}\n url = \"https://api.spotify.com/v1/search\"\n response = await utils.request(url, headers=headers, payload=opts)\n try:\n await ctx.send(\n response.get(\"playlists\")\n .get(\"items\")[0]\n .get(\"external_urls\")\n .get(\"spotify\")\n )\n except (KeyError, AttributeError, IndexError):\n await ctx.send(\"Couldn't find a song for:\\n{}\".format(query))", "def add_songs(self, name, year, title):\n\n album_found = find_object(name, self.album)\n if album_found is None:\n print(\"Not Found \" + name)\n album_found = Album(name, year, self.name)\n self.add_album(album_found)\n else:\n print(\"Found album \"+name)\n\n album_found.add_songs(title)", "def add_music_from_search(request, music_id: int) -> HttpResponse:\n music_item = get_object_or_404(Music, id=music_id)\n\n if music_item in request.user.profile.playlist.all():\n return HttpResponse('Success')\n\n playpos = PlayPosition(\n position=music_item,\n plist=request.user.profile\n )\n\n playpos.add_order()\n playpos.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))", "def add_to_playlist(self, playlist_name, video_id):\n video = self._video_library.get_video(video_id)\n if playlist_name.lower() not in self._playlists:\n print(f\"Cannot add video to {playlist_name}: Playlist does not exist\")\n return\n if not video:\n print(f\"Cannot add video to {playlist_name}: Video does not exist\")\n return\n if video.flag is not None:\n print(f\"Cannot add video to {playlist_name}: Video is currently flagged (reason: {video.flag})\")\n return\n playlist = self._playlists[playlist_name.lower()]\n if video in playlist.videos:\n print(f\"Cannot add video to {playlist_name}: Video already added\")\n return\n playlist.videos.append(self._video_library.get_video(video_id))\n print(f\"Added video to {playlist_name}: {video.title}\")", "def add_song(self, name, year, title):\n\n # Here we check if album exist under artist.\n album_found = find_object(name, self.albums)\n if album_found is None: # If there is no album found\n print(name + \"not found\") # we print \"Album name not found\n album_found = Album(name, year, self.name) # Change_3: Pass \"self.name\" instead of \"self\"\n self.add_album(album_found) # We add new_album to song.\n else: # if we found an existing album with same name\n print(\"found album\" + name) # we print found album name\n\n # so we add song to album_found\n album_found.add_song(title)", "def create_playlist(self, data):\n pass", "def add_to_playlist(self, playlist_name, video_id):\n playlist_id = playlist_name.lower()\n if not playlist_id in self.playlists.keys():\n print(\"Cannot add video to another_playlist: Playlist does not exist\")\n return\n\n if not self._video_library.get_video(video_id):\n print(f\"Cannot add video to {playlist_name}: Video does not exist\")\n return\n\n if video_id in self.playlists[playlist_id].videos:\n print(f\"Cannot add video to {playlist_name}: Video already added\")\n return\n\n video = self._video_library.get_video(video_id)\n self.playlists[playlist_id].videos.append(video_id)\n print(f\"Added video to {playlist_name}: {video.title}\")\n return", "def insert_tracks(event=None):\n # playlist_items = playlist_box.get(0, len(playlist))\n # Fetching tracks\n tracks_items = filedialog.askopenfilenames(filetypes=[(\n \"Audio files\", ('*.flac', '*.wav', '*.mp3', '.ogg'))], title=\"Select tracks\")\n\n # Force insertion of at least one track\n # if (not tracks_list) and (not playlist_items):\n # insert_tracks()\n\n # Inserting into Playlist\n for track_path in tracks_items:\n # Extract file name from full path\n track = os.path.basename(track_path)\n if track not in playlist_box.get(0, len(playlist)): # Avoid duplicates\n playlist_box.insert(END, track)\n playlist.append(track_path)", "async def add(self, ctx, url_string : str):\n logger.info(\"add command issued by {0} with {1}\".format(ctx.message.author.name, url_string))\n if self.spotify_device is None:\n await ctx.send(\"No device playing\")\n elif ctx.voice_client is None:\n await ctx.send(\"No voice to skip\")\n else:\n try:\n url_parsed = urllib.parse.urlparse(url_string)\n except:\n await ctx.send(\"invalid spotify url\")\n return\n url_split = url_parsed.path\n url_split, url_id = os.path.split(url_split)\n url_split, url_type = os.path.split(url_split)\n logger.info(\"type is {0} and id is {1}\".format(url_type, url_id))\n if url_type == 'track':\n self.song_list.append(url_id)\n await ctx.send(\"Added song\")\n else:\n await ctx.send(\"Only single tracks for now\")", "def add_song(_name_of_the_song, _duration_in_number_of_seconds):\r\n # creating an instance of our Song constructor\r\n new_song = Song(name_of_the_song=_name_of_the_song,\r\n duration_in_number_of_seconds=_duration_in_number_of_seconds)\r\n db.session.add(new_song) # add new song to database session\r\n db.session.commit() # commit changes to session\r", "def addSong(self, title, filename):\n #make sure that the filename is valid? or does this happen outside?\n self.__songDictionary[title]=filename\n return True", "def add_to_playlist(self, playlist_name, video_id):\n vid = self._video_library.get_video(video_id)\n if vid and (playlist_name.lower() in self.playlists):\n if video_id not in self.playlists[playlist_name.lower()]:\n print(\"Added video to {0}: {1}\".format(playlist_name, vid.title))\n self.playlists[playlist_name.lower()].append(video_id)\n else:\n print(\"Cannot add video to {0}: Video already added\".format(playlist_name))\n elif playlist_name not in self.playlists:\n print(\"Cannot add video to {0}: Playlist does not exist\".format(playlist_name))\n elif not vid:\n print(\"Cannot add video to {0}: Video does not exist\".format(playlist_name))\n #print(f\"Added video to {self._video_playlist.name}: {video_id}\")\n\n #print(f'Added video to {playlist.name}: {playlist.videos}, {video_id_list}')\n #else:\n #print(f'Cannot add video to [: Video does not exist')", "def insert_playlist(self, playlist_contents):\n\n # Just make sure we don't overwrite an existing playlist! Silly python not having do-while..\n while True:\n playlist_uuid = str(uuid4())\n if playlist_uuid not in self.playlists:\n break\n\n try:\n playlist = Playlist(playlist_contents)\n except PlaylistValidationError as e:\n rsp = rsp_codes[8]\n rsp['trace'] = traceback.format_exc()\n return rsp\n\n self.playlists[playlist_uuid] = playlist\n\n rsp = rsp_codes[0]\n rsp['playlist_uuid'] = playlist_uuid\n return rsp", "def AddPlays(self, plays):\n self.persistant['plays'] += plays", "def add_to_playlist(self, playlist_name, video_id):\n video = self._video_library.get_video(video_id)\n for i in playlist_name:\n if i.title == video.title:\n print(f\"Cannot add video to {playlist_name}: Video already added\")\n else:\n print(f\"Added video to {playlist_name}: {video.title}\")", "def add_song(self):\n # Error check for blank inputs\n if \"\" in (self.root.ids.input_title.text, self.root.ids.input_artist.text, self.root.ids.input_year.text):\n self.root.ids.status_text.text = \"All fields must be completed\"\n return\n # Error check for negative numbers\n try:\n if int(self.root.ids.input_year.text) < 0:\n self.root.ids.status_text.text = \"Year must be >= 0\"\n return\n # Error check for invalid numbers\n except ValueError:\n self.root.ids.status_text.text = \"Please enter a valid number\"\n return\n # Song add, clear inputs, sort songlist\n song_to_add = Song(self.root.ids.input_title.text, self.root.ids.input_artist.text,\n int(self.root.ids.input_year.text))\n self.songs.add_song(song_to_add)\n SongsToLearnApp.clear_inputs(self)\n self.sort_songs(self.root.ids.sort_options.text)", "def _rc_add(self, mrl: MRL):\n self._rc_send('add %s' % mrl)\n # recache playlist\n self.get_playlist()", "def add_video_to_playlist(youtube, args, privacy=\"public\"):\n video_id = args['video_id']\n playlist_id = args['playlist_id']\n \n print(video_id)\n #print(type(args))\n \n if playlist_id:\n return add_video_to_existing_playlist(youtube, playlist_id, video_id)\n else:\n lib.debug(\"Error adding video to playlist\")", "async def add_playlist(\n self, user: discord.User, url: str\n ) -> Optional[UserPlaylist]:\n\n playlist = await get_playlist(self.spotify, self.youtube, url)\n\n if not playlist:\n return\n\n generated_id = str(uuid.uuid4())\n await self.database.insertifnotexists(\n self.tables[\"playlists\"],\n {\"user\": user.id, \"playlist_url\": url, \"id\": generated_id},\n {\"user\": user.id, \"playlist_url\": url},\n )\n\n return UserPlaylist(self, user, generated_id, playlist)", "def add_songs(self):\n settings = dict(initialdir=pathlib.Path().absolute(), title=\"Choose songs\", filetypes=(\n (\"flac files\", \"*.flac\"),\n (\"mp3 files\", \"*.mp3\"),\n (\"all files\", \"*\")))\n\n songs = filedialog.askopenfilenames(**settings)\n\n for song in songs:\n self.update_playlist(song)\n\n self.listbox.insert(\"end\", *[song['name'] for song in self.song_list])\n with open('last_list.pkl', 'wb') as f:\n pickle.dump(self.song_list, f)", "def set_playlist(self, playlist):\n self._playlist = playlist", "def add_tracks_to_spotify_playlist(\n tracks: list, playlist_spotify_id: str, access_token: str\n) -> Optional[str]:\n headers = {\n \"Authorization\": \"Bearer {}\".format(access_token),\n \"Content-Type\": \"application/json\",\n }\n # Add tracks 100 at a time per Spotify API docs\n for i in range(0, len(tracks), 100):\n last = min(i + 100, len(tracks))\n uris = []\n for t in tracks[i:last]:\n if t.spotify_id:\n uris.append(\"spotify:track:{}\".format(t.spotify_id))\n elif match_track_spotify(t, access_token):\n uris.append(\"spotify:track:{}\".format(t.spotify_id))\n response = requests.post(\n \"https://api.spotify.com/v1/playlists/{}/tracks\".format(\n playlist_spotify_id\n ),\n headers=headers,\n json={\"uris\": uris},\n )\n if response.status_code != 200 and response.status_code != 201:\n return \"Error: {}\".format(response.text)\n if last == len(tracks):\n break\n return None", "def create_playlist(self, playlist_name):\n new_playlist_id = playlist_name.lower()\n if new_playlist_id in self.playlists.keys():\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n return\n\n new_playlist = Playlist(playlist_name)\n self.playlists[new_playlist_id] = new_playlist\n print(f\"Successfully created new playlist: {playlist_name}\")", "def songs_added(self, songs, library=True, playlist=None):\n message = {\n \"timestamp\": self._get_time(),\n \"level\": \"INFO\",\n \"type\": \"SONGS_ADDED\",\n \"songs\": songs,\n }\n\n if library:\n message[\"library\"] = library\n else:\n message[\"playlist\"] = playlist\n\n self._log_queue.put(json.dumps(message))", "def create_playlist(access_token):\n request_body = json.dumps({\n \"name\": \"SpotiAdd\",\n \"description\": \"All Liked Youtube Videos\",\n \"public\": True\n })\n userId = getUserId(access_token)\n query = \"https://api.spotify.com/v1/users/{}/playlists\".format(\n userId)\n response = requests.post(\n query,\n data=request_body,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(access_token)\n }\n )\n response_json = response.json()\n # print(\"create_playlist_id : {}\".format(response_json),file = sys.stdout)\n return response_json[\"id\"]", "def create_playlist(self, playlist_name):\n if playlist_name.lower() in self._playlists:\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n return\n print(f\"Successfully created new playlist: {playlist_name}\")\n self._playlists[playlist_name.lower()] = Playlist(playlist_name)", "def create_playlist(self, playlist_name):\n if playlist_name.upper() in self.playlist.keys():\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n else:\n self.playlist[playlist_name.upper()] = []\n self.playlist_list.append(playlist_name)\n print(f\"Successfully created new playlist: {playlist_name}\")", "def add_to_playlist(self, playlist_name, video_id):\n playlist_exists = False\n video_id_exists = False\n for playlist in list(self.playlists.keys()):\n if playlist_name.upper() == playlist.upper():\n playlist_exists = True\n real_playlist_name = playlist\n break\n \n videos = self._video_library.get_all_videos()\n for v in videos:\n if v.video_id.lower() == video_id.lower():\n video_id_exists = True\n video_title = v.title\n break\n video_flagged = False\n if self.flagged:\n for videos_f in self.flagged:\n if video_id.lower() in videos_f:\n video_flagged = True\n reason = videos_f[1]\n break\n if video_flagged:\n print(f\"Cannot add video to {playlist_name}: Video is currently flagged (reason:{reason})\")\n elif playlist_exists == False:\n print(f\"Cannot add video to {playlist_name}: Playlist does not exist\")\n \n elif video_id_exists == False:\n print(f\"Cannot add video to {playlist_name}: Video does not exist\")\n\n elif video_id.lower() in self.playlists[real_playlist_name]:\n print(f\"Cannot add video to {playlist_name}: Video already added\")\n else:\n self.playlists[real_playlist_name].append(video_id.lower())\n print(f\"Added video to {playlist_name}: {video_title}\")\n\n # print(\"add_to_playlist needs implementation\")", "def _mpd_add_track(uri, position = None):\n \n if position != None:\n _mpd_client.addid(uri, position)\n else:\n _mpd_client.addid(uri)", "def add_song(self):\r\n path = input(\"Give file path:\\t\") # Request file path\r\n path = path.replace('\\\\', '/')\r\n if self.path_song_re.match(path) and not self.path_storage_re.match(\r\n path): # Check that the path leads to a song that is not already found in Storage\r\n copy(path, self.p_storage) # Copy the song to the storage directory\r\n file_title, form = path.split(\"/\")[-1].split(\".\") # Save file title and format from the path\r\n sql = \"SELECT COUNT(*) FROM songs WHERE file_title = %s AND form = %s\" # Check the existence of a song\r\n # with the same title and format in the database\r\n self.cursor.execute(sql, (file_title, form))\r\n r = self.cursor.fetchall()\r\n if r[0][0] != 0:\r\n return \"A song with this file name and format already exists!\"\r\n song_title = input(\"Song title:\\t\")\r\n artist = input(\"Artist:\\t\")\r\n data = input(\"Release date:\\t\")\r\n tags = input(\"Associated tags:\\t\")\r\n sql = \"INSERT INTO songs (file_title, song_title, artist, form, data, tag) VALUES (%s, %s, %s, %s, %s, \" \\\r\n \"%s) \" # Insert song into database\r\n columns = (file_title, song_title, artist, form, data, tags)\r\n self.cursor.execute(sql, columns)\r\n self.cnx.commit()\r\n self.cursor.execute(\r\n \"SELECT MAX(ID) FROM songs\")\r\n result = self.cursor.fetchall()\r\n return \"New song ID: \" + str(result[0][0])\r\n else:\r\n return \"Give valid path\"", "def add_song(name, duration):\n song = Song(\n name=name,\n duration=duration,\n )\n db.session.add(song)\n db.session.commit()\n\n return song", "def create_playlist(self, playlist_name):\n if playlist_name.lower() in self.playlists:\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n else:\n self.playlist_names[playlist_name.lower()] = playlist_name\n self.playlists[playlist_name.lower()] = []\n print(\"Successfully created new playlist:\", playlist_name)", "def update_playlist(self, playlist_uuid, playlist_contents, *args):\n\n if playlist_uuid not in self.playlists:\n return rsp_codes[2]\n\n try:\n playlist = Playlist(playlist_contents)\n except PlaylistValidationError:\n rsp = rsp_codes[8]\n rsp['trace'] = traceback.format_exc()\n return rsp\n\n self.playlists[playlist_uuid] = playlist\n\n return rsp_codes[0]", "def adds_new_songs_to_db_by_en_id(yt_playlist_query):\n # yt_playlist_query returned by gets_playlist_history(en_playlist), api_helper.py\n\n for item in yt_playlist_query:\n en_song_id = item['en_song_id']\n is_en_song_id_in_db = db.session.query(exists().where(Song.en_song_id == en_song_id)).scalar()\n if is_en_song_id_in_db == False:\n en_artist_id = item['en_artist_id']\n artist_id = db.session.query(Artist.artist_id).filter(Artist.en_artist_id == en_artist_id).one()\n song_info = Song(en_song_id=en_song_id,\n song_title=item['song_title'],\n artist_id=artist_id)\n db.session.add(song_info)\n db.session.flush", "async def play(self, ctx, *, query):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n query = query.strip('<>')\n if player.is_connected:\n if not ctx.author.voice or not ctx.author.voice.channel or player.connected_channel.id != ctx.author.voice.channel.id:\n return await ctx.send(\"You have to be in my voice channel to queue a song :no_entry:\")\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n return await ctx.send(\"Join a voice channel :no_entry:\")\n else:\n player.store('sessionowner', ctx.author.id)\n player.store('channel', ctx.channel.id)\n await player.connect(ctx.author.voice.channel.id)\n if not url_re.match(query):\n query = \"ytsearch:{}\".format(query)\n results = await self.bot.lavalink.get_tracks(query)\n if not results or not results['tracks']:\n return await ctx.send(\"I could not find any songs matching that query :no_entry:\")\n s=discord.Embed()\n if results[\"loadType\"] == \"PLAYLIST_LOADED\":\n tracks = results[\"tracks\"]\n for track in tracks:\n player.add(requester=ctx.author.id, track=track)\n s.description = \"Enqueued {} with **{}** tracks <:done:403285928233402378>\".format(results['playlistInfo']['name'], len(tracks))\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n else:\n track = results[\"tracks\"][0]\n player.add(requester=ctx.author.id, track=track)\n timetill = 0\n for x in player.queue:\n timetill += x.duration\n if player.current:\n timetill += player.current.duration - player.position\n else:\n timetill = 0 \n index = [x.track for x in player.queue].index(track[\"track\"]) + 1\n s.set_author(name=\"Added to Queue\", icon_url=ctx.author.avatar_url)\n s.set_thumbnail(url=\"https://img.youtube.com/vi/{}/default.jpg\".format(track[\"info\"][\"identifier\"]))\n s.add_field(name=\"Song\", value=\"[{}]({})\".format(track[\"info\"][\"title\"], track[\"info\"][\"uri\"]), inline=False)\n s.add_field(name=\"Duration\", value=self.format_time(track[\"info\"][\"length\"]), inline=True)\n s.add_field(name=\"Position in Queue\", value=index)\n if timetill != 0:\n s.add_field(name=\"Estimated time till playing\", value=self.format_time(timetill-track[\"info\"][\"length\"]))\n else:\n s.add_field(name=\"Estimated time till playing\", value=\"Next\")\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n if not player.is_playing:\n await player.play()", "def add_track(self, track, show_artist=False):\n\n url = self.connection.streamUrl(\n sid=track[\"id\"], maxBitRate=self.bitrate,\n tformat=self.transcode_format)\n\n # Create list item\n if show_artist:\n title = \"%s - %s\" % (\n track.get(\"artist\", \"<Unknown>\"),\n track.get(\"title\", \"<Unknown>\"))\n else:\n title = track.get(\"title\", \"<Unknown>\")\n\n # Create item\n li = xbmcgui.ListItem(title)\n\n # Handle cover art\n if \"coverArt\" in track:\n cover_art_url = self.connection.getCoverArtUrl(track[\"coverArt\"])\n\n li.setIconImage(cover_art_url)\n li.setThumbnailImage(cover_art_url)\n li.setProperty(\"fanart_image\", cover_art_url)\n\n # Handle metadata\n li.setProperty(\"IsPlayable\", \"true\")\n li.setMimeType(track.get(\"contentType\"))\n li.setInfo(type=\"Music\", infoLabels={\n \"Artist\": track.get(\"artist\"),\n \"Title\": track.get(\"title\"),\n \"Year\": track.get(\"year\"),\n \"Duration\": track.get(\"duration\"),\n \"Genre\": track.get(\"genre\"),\n \"TrackNumber\": track.get(\"track\")})\n\n xbmcplugin.addDirectoryItem(\n handle=self.addon_handle, url=url, listitem=li)", "def song_already_exists(song, playlist_id):\n print('Song {title} already in playlist {playlist_id}, adding has been skipped.'\n .format(title=song.title,\n playlist_id=playlist_id))\n pass", "async def playlist(self, data, msg):\n for i in data['queue']:\n print(i)\n self.player[msg.guild.id]['queue'].append(\n {'title': i, 'author': msg})", "def add_track_to_playlist(self, track_uri, playlist_id):\n endpoint = f'/playlists/{playlist_id}/tracks?uris={track_uri}'\n url = f'{self.api_base_url}{endpoint}'\n\n req = requests.post(url, headers=self.__header_bearer())\n\n return True if req.status_code == 201 else False", "def create_playlist(self, playlist_name):\n print(\"create_playlist needs implementation\")", "def create_playlist(self, playlist_name):\n #self._video_playlist.name=playlist_name\n #self._video_playlist.caseless=playlist_name.lower()\n #print(f\"Successfully created new playlist: {self._video_playlist.name}\")\n if playlist_name.lower() not in self.playlists:\n self.playlists[playlist_name.lower()]=[]\n print(\"Successfully created new playlist: {0}\".format(playlist_name))\n else:\n print(\"Cannot create playlist: A playlist with the same name already exists\")", "def add_playlist_tracks(self, username, playlist_name, track_list):\n playlist_id = self.get_playlist_id(username, playlist_name)\n request_chunks = [track_list[i:i + 100] for i in range(0, len(track_list), 100)] # Blocks of 100 songs\n for track_chunk in request_chunks:\n self.spotify.user_playlist_add_tracks(username, playlist_id, track_chunk)", "def song_added(song, playlist_id):\n if song.added_by == 'cedmunds90':\n print('Ruhpushuh {song_id} ({title}) ruhpush a shuh {playlist_id} rhup {added_by}.'\n .format(song_id=song.id,\n title=song.title,\n playlist_id=playlist_id,\n added_by=song.added_by))\n pass\n else:\n print('Song {song_id} ({title}) added to playlist {playlist_id} by {added_by}.'\n .format(song_id=song.id,\n title=song.title,\n playlist_id=playlist_id,\n added_by=song.added_by))\n\n pass", "def on_playlist_command(self, event, to_shuffle, url=\"\"):\n self.pre_check(event)\n if to_shuffle != \"shuffle\" and to_shuffle != \"Shuffle\":\n url = \"{} {}\".format(to_shuffle, url)\n to_shuffle = \"no\"\n url_not_found = False\n for url_format in (\"https://www.youtube.com/playlist?list=\",\n \"https://youtube.com/playlist?list=\", \"https://youtu.be\"):\n if url_format in url:\n url_not_found = True\n if not url_not_found:\n return api_loop(\n event.channel.send_message,\n \"Invalid youtube playlist link.\",\n )\n if event.guild.get_member(event.author).get_voice_state():\n self.on_join(event)\n self.same_channel_check(event)\n if (event.author.id not in self.cool_down[\"general\"] or\n time() - self.cool_down[\"general\"][event.author.id] >= 1):\n if (event.guild.id not in self.cool_down[\"playlist\"] or\n not self.cool_down[\"playlist\"][event.guild.id]):\n self.cool_down[\"playlist\"][event.guild.id] = True\n self.cool_down[\"general\"][event.author.id] = time()\n videos_added = 0\n many_object = YoutubeDLInput.many(url, command=\"ffmpeg\")\n try:\n many_object = list(many_object)\n except Exception as e:\n return api_loop(\n event.channel.send_message,\n \"Playlist not found: {}\".format(e),\n )\n if to_shuffle == \"shuffle\" or to_shuffle == \"Shuffle\":\n shuffle(many_object)\n message = api_loop(\n event.channel.send_message,\n \"Adding music from playlist.\",\n )\n for youtubedl_object in many_object:\n try:\n yt_data = self.get_ytdl_values(youtubedl_object.info)\n except DownloadError as e:\n continue\n if yt_data[\"is_live\"]:\n continue\n elif yt_data is None or yt_data[\"duration\"] > 3620:\n continue\n try:\n self.get_player(event.guild.id).append(youtubedl_object)\n except CommandError as e:\n self.cool_down[\"playlist\"][event.guild.id] = False\n raise e\n videos_added += 1\n message.edit(\n \"Successfully added {} videos to queue from playlist and dropped {} videos.\".format(\n videos_added,\n len(many_object) - videos_added,\n ),\n )\n self.cool_down[\"playlist\"][event.guild.id] = False\n else:\n api_loop(\n event.channel.send_message,\n \"Still adding previous playlist, please wait.\",\n )\n else:\n cool = round(\n Decimal(\n 1 - (time() - self.cool_down[\"general\"][event.author.id]),\n ),\n )\n api_loop(\n event.channel.send_message,\n \"Cool down: {} seconds left.\".format(cool),\n )", "def playlist_tracks_add(self, playlist_id: str, track_ids: list,\n position: int = None):\n payload = {'uris': [to_uri('track', t) for t in track_ids]}\n return self._post(f'playlists/{playlist_id}/tracks', payload=payload,\n position=position)", "def create_playlist(self, playlist_name):\n playlist_name = Playlist()\n if self != playlist_name:\n print(f\"successfully created new playlist: {playlist_name}\")\n else:\n print(\"Cannot create playlist: A playlist with the same name already exists\")", "def new_playlist_command(self):\n self.parent.song_object_list.clear()\n self.display_data(self.parent.song_object_list)\n self.playlist_select.set(\"Working Playlist\")", "def add_entry():\n username = util.remove_commas_from_string(request.form[\"name\"])\n link = util.remove_commas_from_string(request.form[\"ytLink\"])\n song = util.remove_commas_from_string(request.form[\"songName\"])\n\n festive = CHRISTMAS_MODE and \"christmasSong\" in request.form\n\n with database.connect_to_database() as db:\n user_id = database.get_userid(db, username)\n database.add_song(db, link, song, user_id, month=12 if festive else None)\n\n return redirect(url_for('main'))", "def __add_song(self, song, genius_api):\n\t\tentry = {\n\t\t\t'id' : int(song['id']),\n\t\t\t'title' : song['title'],\n\t\t\t'primary_artist' : {\n\t\t\t\t'id' : song['primary_artist']['id'],\n\t\t\t\t'name' : str(song['primary_artist']['name']).lower(),\n\t\t\t\t'url' : song['primary_artist']['url'],\n\t\t\t\t'is_verified' : song['primary_artist']['is_verified'],\n\t\t\t\t},\n\t\t\t'url' : song['url'],\n\t\t\t'lyrics' : genius_api.get_lyrics(song['id'], song['url'])\n\t\t\t}\n\t\tif song['album']:\n\t\t\tentry['album'] = {\n\t\t\t\t'id': song['album']['id'], \n\t\t\t\t'full_title': song['album']['full_title'], \n\t\t\t\t'name': song['album']['name'], \n\t\t\t\t'artist': song['album']['artist']['id']\n\t\t\t\t}\n\t\tif song['release_date']:\n\t\t\tentry['release_date'] = song['release_date']\n\t\tif len(song['featured_artists']) > 0:\n\t\t\tfeatured_artists = list()\n\t\t\tfor artist in song['featured_artists']:\n\t\t\t\tart = {\n\t\t\t\t\t'id' : artist['id'],\n\t\t\t\t\t'name' : artist['name'].lower()\n\t\t\t\t\t}\n\t\t\t\tfeatured_artists.append(art)\n\t\t\tentry['featured_artists'] = featured_artists\n\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\tself.db.songs.insert_one(entry)", "def set_playlist(self, playlist: List[Dict[str, Any]]) -> None:\n self._playlist = copy.deepcopy(playlist)", "def add_tracks_to_playlist(self, track_ids):\n endpoint = f\"playlists/{self.playlist_id}/tracks\"\n self.spotify_client._headers[\"Content-Type\"] = \"application/json\"\n self.spotify_client._data = json.dumps(\n [f\"spotify:track:{track_id}\" for track_id in track_ids]\n )\n response = self.spotify_client._post_api_data(endpoint)\n return response", "def create_playlist():\n sp = credentials()\n sp.user_playlist_create('truetiming', name='Billboard Hot 100')", "def cmd_pasetplaylist(self, data, client, cmd):\n if not self._isplaylist_enabled and not self._isranked:\n client.message('Playlists are not enabled in this server. You can\\'t set a playlist!')\n return\n\n _number_of_playlists = len(self._playlists)\n\n if not data:\n client.message('missing parameter, try !help pasetplaylist')\n return\n\n try:\n float(data)\n except ValueError:\n client.message('Please use a playlist number, %s is not a numeric value' % data)\n return\n\n data = int(data)\n if data not in range(1, _number_of_playlists + 1):\n client.message('Playlist number %s out of range! Please enter a valid number' % data)\n else:\n self.console.write('setadmindvar playlist %s' % data, maxRetries=5)\n client.message('Changing playlist to ^3%s - %s' % (data, self._playlists[data]))", "def create_playlist(self, playlist_name):\n for playlist in self.playlists.keys():\n if playlist_name.upper() == playlist.upper():\n print(\"Cannot create playlist: A playlist with the same name already exists\")\n break\n else:\n self.playlists[playlist_name]=[]\n print(\"Successfully created new playlist: \" + playlist_name)\n # print(\"create_playlist needs implementation\")", "def add_song():\n return render_template('pong!')", "def add_album(self, album, show_artist=False):\n\n url = self.build_url({\n \"mode\": \"track_list\",\n \"album_id\": album[\"id\"]})\n\n # Create list item\n if show_artist:\n title = \"%s - %s\" % (\n album.get(\"artist\", \"<Unknown>\"),\n album.get(\"name\", \"<Unknown>\"))\n else:\n title = album.get(\"name\", \"<Unknown>\")\n\n # Add year if applicable\n if album.get(\"year\"):\n title = \"%s [%d]\" % (title, album.get(\"year\"))\n\n # Create item\n li = xbmcgui.ListItem()\n li.setLabel(title)\n\n # Handle cover art\n if \"coverArt\" in album:\n cover_art_url = self.connection.getCoverArtUrl(album[\"coverArt\"])\n\n li.setIconImage(cover_art_url)\n li.setThumbnailImage(cover_art_url)\n li.setProperty(\"fanart_image\", cover_art_url)\n\n # Handle metadata\n li.setInfo(type=\"music\", infoLabels={\n \"Artist\": album.get(\"artist\"),\n \"Album\": album.get(\"name\"),\n \"Year\": album.get(\"year\")})\n\n xbmcplugin.addDirectoryItem(\n handle=self.addon_handle, url=url, listitem=li, isFolder=True)", "def add_new_song(self):\n return \"New Song Added\"", "def remove_song(self, song):\n # code omitted\n self.playlist.remove(song)", "def savePlaylist():\n\n # get user form info\n title = request.json.get('title')\n interval = request.json.get('interval')\n orig_playlist_id = request.json.get('playlist_id')\n\n # create a new playlist\n new_playlist = crud.createPlaylist(session, title)\n\n new_playlist_id = new_playlist['id']\n\n user_id = session['user_id']\n\n # store playlist in DB\n savedPlaylist = crud.storeSavedPlaylist(user_id, orig_playlist_id, \n new_playlist_id, interval, title)\n print(savedPlaylist)\n \n # copy over tracks in original playlist to the new playlist\n snapshot_id = crud.updatePlaylist(session, orig_playlist_id, new_playlist_id)\n\n return snapshot_id", "def create_playlist(self, request):\n # TODO: Max amount of playlists at 20 for a user\n user = Account.find_by_id(request.userid)\n if user is None:\n print \"User not found\" \n return PlaylistResponse(errmsg=\"User ID not found\")\n new_pl = Playlist.add_new_playlist(user.key, request.name)\n return PlaylistResponse(pid=new_pl.key.id())", "def add_video_to_playlist(self, video_id, playlist_id):\n if self.youtube is None:\n self.youtube = __get_client()\n self.youtube.playlistItems().insert(\n part=\"snippet\",\n body={\n 'snippet': {\n 'playlistId': playlist_id,\n 'resourceId': {\n 'kind': 'youtube#video',\n 'videoId': video_id\n }\n }\n }\n ).execute()", "def save_playlist_command(self):\n self.switch_frame(\"Save Playlist\")", "def create_playlist(self, playlist_name: str, song_ids: List[str]) -> str:\n user = self.init_user()\n user_id = user.me()['id']\n playlist_data = user.user_playlist_create(\n user=user_id, name=playlist_name, public=True)\n user.playlist_add_items(playlist_data['id'], song_ids)\n playlist_link = playlist_data['external_urls']['spotify']\n return playlist_link", "def pl_btn_push(self):\n try:\n pl_name = self.pl_line_edit.text().replace(\" \", \"_\")\n path = os.path.abspath(\"Playlists/\"+pl_name+\".m3u\")\n pl_file = open(path, 'a')\n\n songs = glob.glob(\"Fixed/*/*/*\")\n for row in range(self.model.rowCount()):\n if self.model.item(row).checkState():\n index = self.model.index(row, 4)\n for song in songs:\n data = mutagen.File(song, easy=True)\n track = get_track(data['title'][0], data['artist'][0])\n if int(track.track_id) == int(self.model.data(index)):\n mp3_path = os.path.abspath(song)\n pl_file.write(mp3_path+\"\\n\")\n QMessageBox.about(self, \"Playlist Updated\",\n 'Playlist \"%s\" has been updated.'%(self.pl_line_edit.text()))\n pl_file.close()\n except:\n QMessageBox.about(self, \"Playlist Not Updated\",\n 'Playlist \"%s\" could not be updated.'%(self.pl_line_edit.text()))", "def song(self, value):\r\n self._song_id = value\r\n data = Song(value)\r\n self.songtitel = data.songtitel if data.found else \"\"", "def add_album(self, album):\n\n self.album.append(album)", "def add_album(self, album):\n self.albums.append(album)" ]
[ "0.8281379", "0.8122598", "0.81120324", "0.77928084", "0.7633073", "0.76006603", "0.73052067", "0.7280356", "0.7275825", "0.72581446", "0.72479594", "0.7200695", "0.7198082", "0.71816415", "0.71812034", "0.70947707", "0.7091116", "0.70539707", "0.7045048", "0.70189893", "0.6964508", "0.6949354", "0.69120246", "0.6910864", "0.69066316", "0.689984", "0.68889886", "0.6876355", "0.68566334", "0.68505096", "0.6796031", "0.6780874", "0.673331", "0.66947734", "0.6672581", "0.66486883", "0.6635108", "0.662235", "0.66204417", "0.661864", "0.6570199", "0.65492326", "0.6542671", "0.6538331", "0.65316784", "0.65059125", "0.6500916", "0.64904493", "0.6463795", "0.645555", "0.64249027", "0.64181346", "0.6413261", "0.6412501", "0.6404357", "0.6402856", "0.63770425", "0.637216", "0.6367915", "0.6338603", "0.6327951", "0.6323835", "0.6310232", "0.62967914", "0.6286803", "0.62792075", "0.62693894", "0.6263353", "0.62589806", "0.6250637", "0.6245237", "0.6244063", "0.6242834", "0.6239977", "0.62274224", "0.62092984", "0.62043464", "0.619814", "0.61938214", "0.618059", "0.615124", "0.6150141", "0.6133866", "0.61166215", "0.61165774", "0.61044294", "0.609971", "0.60546225", "0.6050085", "0.60371566", "0.602493", "0.6012903", "0.6010055", "0.60085505", "0.6008529", "0.6000093", "0.5999283", "0.5997444", "0.59732157", "0.5971208" ]
0.7014243
20
[would list out friends, not working]
def list_users_friends(self): user = self.sp.user(self.user) return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_friend_list(self):\n self.friends = self.df[['user_id','friends']]", "def find_friends(request):\n find_list = []\n sent_requests = set()\n rec_requests = set()\n sent_f_requests = FriendRequest.objects.filter(\n from_user=request.user\n )\n rec_f_requests = FriendRequest.objects.filter(\n to_user=request.user\n )\n\n me = request.user\n my_friends = me.profile.friends.all()\n my_family = me.relations.all()\n profiles = Profile.objects.exclude(\n user=request.user\n )\n for user in profiles:\n user_friends = user.friends.all()\n for friend in user_friends:\n if friend not in find_list and friend != me:\n if friend not in my_friends and friend not in my_family:\n find_list.append(friend)\n\n template = 'profiles/find_friends.html'\n context = {\n 'find_list': find_list,\n }\n return render(request, template, context)", "def get_friends_of_friend(friends, data):\n friends_of_friends = []\n for friend in friends:\n friend_list = get_friends(friend, data)\n friends_of_friends.append(friend_list)\n return sum(friends_of_friends, [])", "def friends():\n friends = [u.to_dict() for u in g.user.get_friends()]\n return jsonify({'success': True, 'friends': friends})", "def get_my_friends(self):\n query = read_query('content exploration/my_friends')\n response = self._submit_query(query)\n return [elem['name']['value'].split('/')[-1] for elem in response]", "def friends(self):\n #Guillaume\n friends_list = []\n received = Friendships.objects.filter(request_for=self, status='A')\n for friend in received:\n friends_list.append(friend.request_from)\n sent = Friendships.objects.filter(request_from=self, status='A')\n for friend in sent:\n friends_list.append(friend.request_for)\n return friends_list", "async def get_friends(self) -> List[User]:\n me = await self.get_self()\n r = await self.request.request(url=f'https://friends.roblox.com/v1/users/{me.id}/friends', method=\"GET\")\n data = r.json()\n friends = []\n for friend in data['data']:\n friends.append(User(self.request, friend['id'], friend['name']))\n return friends", "def get_friends(user, data):\n setA = list(\n data.loc[data.user == user].user_friend_list.values)\n setB = list(\n data.loc[data.user_friend_list == user].user\n .values)\n friends = list(set(set(setA).union(setB)))\n return friends", "def get_possible_friends():\n user_list = []\n for user_unprocessed in api_vars.users.find({'public': True}):\n user = user_unprocessed\n user['_id'] = str(user['_id'])\n user_list.append(user)\n # For now, let's break the list at one hundred. This is just for the\n # sake of simplicity.\n if len(user_list) >= 100:\n break\n user_data = {'users': user_list}\n json_data = json.dumps(user_data)\n return json_data", "def friends(self) -> List['Friend']:\n\n return self.sObj.getFriends()", "def user_list_friend_requests(self):\n email_token = auth.current_user()[0]\n friend_emails = self.friend_database.get_friend_requests(email_token)\n friends = [self.auth_server.profile_query(email) for email in friend_emails]\n return json.dumps(friends), 200", "def friend_list(request):\n profile = Profile.objects.get(user=request.user)\n context = {\n 'profile': profile,\n }\n return render(request, 'profiles/my_friends.html', context)", "def get_friends(graph, location_id=\"\", is_user=\"\"):\n user = graph.get_object(\"me\")\n fql = \"SELECT uid, name, profile_url, pic_small, current_location, mutual_friend_count FROM user WHERE uid IN (SELECT uid1 FROM friend WHERE uid2 = \" + user[\"id\"] + \")\"\n if location_id:\n fql += \" AND current_location.id=\" + location_id\n if is_user:\n fql += \" AND is_app_user=\" + is_user\n fql += \" ORDER BY mutual_friend_count DESC\"\n logging.info(fql)\n try:\n fql_friends = graph.fql(fql)\n return fql_friends['data']\n except:\n logging.error(\"There was an error retrieving friends of UID %s\", user[\"id\"])\n return list()", "def get_friends(user):\r\n try:\r\n friends = user.friends()\r\n return friends[:]\r\n except tweepy.error.RateLimitError:\r\n print(\"Rate limit reached! Waiting...\")\r\n wait_15_minutes()\r\n return get_friends(user)\r\n except tweepy.error.TweepError:\r\n print(\"Skipping user whose information is protected.\")\r\n return list()", "def signed_up_friends(self):\n friends = self.twitter_oauth.friends\n if not friends:\n return [], []\n return friends, User.query.filter(\n User.username.in_(x.screen_name for x in friends))", "def get_friends():\n\n acct = get_current_account(session[\"acct\"])\n get_user_friends(acct, GR_KEY, GR_SECRET)\n search = False\n\n return render_template(\"index.html\", acct=acct, search=search)", "def get_friends(character, _info):\n return map(get_character, character.friends)", "def get_friends(self, force: bool = False) -> List[types.FriendInformation]:\n raise NotImplementedError", "def get_user_friends(user_id):\n\n friends = db.session.query(User_Friend).filter(User_Friend.user_id==user_id).all() \n\n return friends", "def get_friends(self):\n edges = DirectedUserToUserEdge.all().filter(\n 'owner_user_id =', self.key().id()).run()\n return db.get([db.Key.from_path('User', edge.friend_user_id) for edge in\n edges])", "def ReorganizeFriendList(self):\n with sqlite3.connect(self.DBname) as conn:\n c = conn.cursor()\n c.execute(\"select ZID, FRIENDS from user_info\")\n user_list = c.fetchall()\n for user, friends in user_list:\n out = set()\n friends = [x.strip() for x in friends.split(\",\")]\n for friend in friends:\n c.execute(\"select FRIENDS from user_info where ZID = (?)\",[friend])\n TFL = c.fetchone()[0]\n TFL = [x.strip() for x in TFL.split(\",\")]\n if user not in TFL:\n out.add(friend)\n NFL = list(set(friends) - out)\n self.UpdateFriendList(user,NFL)", "def get_friends(self):\n friends = self.graph.get_connections(\"me\", \"friends\")\n return friends['data']", "def user_list_friends(self):\n email_query = request.args.get('email')\n if not email_query:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"email\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"email\", 400\n email_token = auth.current_user()[0]\n if email_token != email_query and not self.friend_database.are_friends(email_token, email_query):\n self.logger.debug(messages.USER_NOT_AUTHORIZED_ERROR)\n return messages.ERROR_JSON % messages.USER_NOT_AUTHORIZED_ERROR, 403\n friend_emails = self.friend_database.get_friends(email_query)\n friends = [self.auth_server.profile_query(email) for email in friend_emails]\n return json.dumps(friends), 200", "async def get_friends(_: User = Depends(get_current_user),\n db: Session = Depends(get_db)):\n # Need a way to map GPG keys to users in DB\n pairs = crud.get_name_email_pairs(db)\n return pairs", "def get_friends(twitter,userid,count):\n url = 'https://api.twitter.com/1.1/friends/ids.json?&user_id='+str(userid)+'&skip_status=true&include_user_entities=false&count='+str(count) \n consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)\n access = oauth.Token(key=access_token, secret=access_token_secret)\n client = oauth.Client(consumer, access)\n li=[]\n try:\n response,data = client.request(url)\n dataStr = data.decode('utf-8') \n if('Rate limit exceeded' in dataStr ):\n print('rate limit exceeded error.. sleep for 15 min')\n time.sleep(61 * 15)\n response,data = client.request(url)\n \n jsonid = json.loads(dataStr)\n li = list(jsonid['ids'])\n \n except:\n pass\n \n return li", "def friends(request):\n return friendslist(request, request.session['id'])", "def get_friends(twitter, screen_name):\n request = robust_request(twitter, 'friends/ids', {'screen_name': screen_name}, max_tries=5)\n friend_list = []\n for r in request:\n friend_list.append(r)\n return sorted(friend_list)", "def friend_info(self):\n return self._reddit.get(API_PATH['friend_v1'].format(user=self))", "def crawl_friends(api, friends_ids=[]):\n req_count = len(friends_ids) / 100.0\n hours = req_count / 12.0 / 60.0\n print \"Estimated time to crawl profiles: {:,} hours\".format(hours)\n print\n friends = users_lookup(api, friends_ids)\n return friends", "def print_num_friends(users):\n for u_dict in users:\n print (\"%s %d\" %(u_dict['screen_name'], len(u_dict['friends'])))", "def get_friends(api, username, limit):\n for friend in tqdm(tweepy.Cursor(api.friends, screen_name=username).items(limit), unit=\"friends\", total=limit):\n process_friend(friend)", "def get_friends(self, user_id):\n # if user_id is alias, replace it with id\n if not self._is_positive_number(user_id):\n user_id = get_names_of_users(set([user_id]))[0].id\n api = pyvkontakte.VkontakteApi()\n return set(api.call('friends.get', user_id=user_id, v='5.8')['items'])", "def get_friends(driver, username):\r\n\r\n driver.get('https://www.facebook.com/' + username + '/friends_all')\r\n scroll_to_end_of_page(driver)\r\n friends = driver.find_elements_by_css_selector('.fsl.fwb.fcb')\r\n for i in range(len(friends)):\r\n friends[i] = friends[i].text\r\n return friends", "def list(self, request):\n\n user_profile = get_object_or_404(UserProfile, user=request.user)\n #   Get all sent accepted invitations\n sent = user_profile.creator_friendships.filter(status=1)\n # Get all received accepted invitations\n received = user_profile.invited_friendships.filter(status=1)\n #   Combine results to get all friends:\n friends = []\n for friendship in sent:\n friends.append(UserProfileSerializer(friendship.user_2).data)\n for friendship in received:\n friends.append(UserProfileSerializer(friendship.user_1).data)\n return Response(friends, status=rest_status.HTTP_200_OK)", "def show_friends():\n\n\n user_id = session['user_id']\n user = User.query.get(user_id)\n friendship = Friendship.query.get(user_id)\n\n return render_template('friends.html', user=user, friendship=friendship)", "def addFriends(author):\n friends = author.friends.all()\n remote_friends = RemoteFriend.objects.all().filter(author=author)\n friend_list = list()\n if friends:\n for friend in friends:\n friend_dict = {'id': \"{}/api/{}\".format(DOMAIN, friend.id), 'host': friend.host_url,\n 'displayName': friend.username, 'url': \"{}/api/{}\".format(DOMAIN, friend.id)}\n friend_list.append(friend_dict)\n\n if remote_friends:\n for remote in remote_friends:\n friend_dict = {'id': remote.url, 'host': remote.host,\n 'displayName': remote.displayName, 'url': remote.url}\n friend_list.append(friend_dict)\n\n remote = check_remote_friends(author)\n friend_list += remote\n return friend_list", "def get_friends(user_id):\n return list(set(get_following(user_id)) &\n set(get_followers(user_id)))", "def getMembers():", "def getMembers():", "def getMembers():", "def getMembers():", "def _get_friends_random_list(self, citizen):\r\n \r\n number_friends = int(random.uniform(len(citizen.friends)*0.05, len(citizen.friends)*0.2))\r\n return random.sample(citizen.friends, number_friends)", "def test_friends_of_friend_ids(self):\n expected = {0: 2, 5: 1}\n self.assertEqual(expected, self.users.friends_of_friend_ids(3))", "def get_queryset(self):\n self.object = self.get_object()\n return self.object.friends.all().exclude(user=self.object.user)", "def list(self, request, *args, **kwargs):\n from_user = request.QUERY_PARAMS.get('user', None)\n if from_user:\n from_user_id = from_user\n else:\n from_user_id = request.user.id\n\n query = Friend.objects.filter(from_user_id=from_user_id)\n # items = request.QUERY_PARAMS.get('item', 50)\n items = 200\n paginator = Paginator(query, items)\n\n page = request.QUERY_PARAMS.get('page')\n\n try:\n friends = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n friends = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999),\n # deliver last page of results.\n friends = paginator.page(paginator.num_pages)\n user_location = UserLocation.objects.filter(user=request.user).order_by('-modified_at').first()\n context = dict(user_id=request.user.id)\n if user_location:\n context['lat'] = user_location.lat\n context['lon'] = user_location.lon\n serializer = PaginatedFriendSerializer(friends, context=context)\n return Response(serializer.data, status=200)", "def get_pending_friends(cu_id):\n users = db.session.execute(\n \"\"\"select fr.user_1_id, u.username, u.firstname, u.lastname\n from friend_request as fr inner join userm as u on fr.user_1_id = u.id \n where fr.user_2_id = :cu_id\n and fr.approved is NULL\"\"\",\n {\"cu_id\": cu_id}\n )\n return users", "def count_friends(users):\n all_friends=[]\n for u_dict in users:\n for items in u_dict['friends']:\n all_friends.append(items)\n count = Counter()\n for frnd in all_friends:\n count[frnd]+=1\n return count", "def getInterestedUsers():", "def get_mutual_friends(person1_friends, person2_friends):\n return list(set(person1_friends) & set(person2_friends))", "def test_how_many_friends(self):\n expected = [\n (1, 3), (2, 3), (3, 3), (5, 3), (8, 3),\n (0, 2), (4, 2), (6, 2), (7, 2), (9, 1),\n ]\n self.assertEqual(expected, self.users.how_many_friends())", "def friends(user_id):\n user = user_grab(user_id)\n if user is None:\n return \"user not found\", 404\n friends = user.get(\"friends\")\n if friends is None:\n friends = []\n data_json = json.dumps({'friends': [str(friend) for friend in friends]})\n return data_json", "def get_user_friends_locations_list(\n bearer_token: str, screen_name: str, friends_num: int=50\n ) -> list:\n base_url = 'https://api.twitter.com/'\n search_headers = {\n 'Authorization': f'Bearer {bearer_token}'\n }\n search_params = {\n 'screen_name': f'{screen_name}',\n 'count': friends_num\n }\n search_url = f'{base_url}1.1/friends/list.json'\n response = requests.get(\n search_url, headers=search_headers, params=search_params\n )\n\n data = response.json()\n\n return [\n (user['name'], user['location'])\n for user in data['users']\n if len(user['location']) != 0\n ]", "def view_friends(request, username):\n user = get_object_or_404(user_model, username=username)\n qs = Friend.objects.select_related(\"UserProfile\").filter(to_user=user)\n friends = [u.from_user for u in qs]\n self = navbar(request.user.id)\n user1 = self.user.id\n for i in friends:\n to_user = i.id\n i.user2 = str(user1)+\"|\"+str(to_user)\n return render_to_response( 'view_friends.html', {'friends': friends, 'self':self})", "def friends_of_friends(self, node, ids):\n fof = set()\n for id in ids:\n for f in self.users[id]:\n if f != node:\n fof.add(f)\n return fof", "def populateFriendTree(self):\n self.friends = self.twitter.GetFriends()\n self.tree = self.builder.get_object(\"rel_tree\")\n self.tree_list = gtk.ListStore(gobject.TYPE_STRING)\n self.temp_list = []\n for k in self.friends:\n self.temp_list.append(k[\"screen_name\"])\n for name in self.temp_list:\n self.tree_list.append([name])\n self.tree.set_model(self.tree_list)", "def get_users_by_friendid(cls, provider, friendid):\n #return db.GqlQuery(\"SELECT user FROM Logins WHERE provider_name = :1 AND friendids = :2\",provider, friendid)\n count = 0\n if provider == 'twitter':\n query = db.GqlQuery(\"SELECT * FROM TwitterFriendsTBD WHERE friendids = :1 ORDER BY __key__\", friendid)\n elif provider == 'facebook':\n query = db.GqlQuery(\"SELECT * FROM FacebookFriendsTBD WHERE friendids = :1 ORDER BY __key__\", friendid)\n users = []\n while 1:\n logins = query.fetch(1000)\n current_count = query.count()\n count += current_count\n if current_count == 0:\n break\n for alogin in logins:\n users.append(alogin.user)\n query.with_cursor(query.cursor())\n\n \"\"\"\n while 1:\n current_count = query.count()\n count += current_count\n if current_count == 0:\n break\n logins = query.fetch(1000)\n last_key = None\n for alogin in logins:\n users.append(alogin.user)\n last_key = alogin.key()\n query = query.filter('__key__ > ', last_key)\n \"\"\"\n return users", "def display_all_friends(self):\r\n # Initial count of elements\r\n element_count = len(self.driver.find_elements_by_css_selector(self.CSS_SELECTOR_FRIEND))\r\n print(\"Processing \" + str(element_count) + \" CSS selectors...\")\r\n\r\n # Scroll page to the end\r\n while True:\r\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n time.sleep(3)\r\n\r\n # If the number of elements is the same, the end of page has been reached\r\n if len(self.driver.find_elements_by_css_selector(self.CSS_SELECTOR_FRIEND)) == element_count:\r\n break\r\n\r\n element_count = len(self.driver.find_elements_by_css_selector(self.CSS_SELECTOR_FRIEND))\r\n print(\"Processing \" + str(element_count) + \" CSS selectors...\")\r\n\r\n # Store friends in class variable\r\n self.friends = self.driver.find_elements_by_css_selector(self.CSS_SELECTOR_FRIEND)", "def friend_overlap(users):\n list_overlap = []\n list_common = []\n m=0\n for i in range(0,len(users)):\n \tfor j in range(i+1,len(users)):\n \t\ts1 = set.intersection(set(users[i].get('friends')), set(users[j].get('friends')))\n \t\tlist_common.append(s1)\n for i in range(0,len(users)):\n for j in range(i+1,len(users)):\n list_overlap.append((users[i]['screen_name'],users[j]['screen_name'],len(list_common[m])))\n m = m + 1\n return sorted(list_overlap, key=lambda x: (x[2]), reverse=True)", "def getFollowers():\n\n cur, user_id = initialise(3)\n cur.execute(\"SELECT following FROM followers WHERE user = (SELECT username FROM users WHERE id = ?)\", [user_id])\n tempFollowers = cur.fetchall()\n followers = []\n for follower in tempFollowers:\n followers.append(follower[0])\n return followers", "def get_friends_page(session, user_id, page):\n\n url = 'https://www.goodreads.com/friend/user'\n params = {'id': user_id, 'format': 'xml', 'page': page}\n response = session.get(url, params=params)\n\n doc = untangle.parse(response.content)\n total = int(doc.GoodreadsResponse.friends['total'])\n friends = doc.GoodreadsResponse.friends\n\n return (total, friends)", "def get_best_friends(self):\n query = read_query('content exploration/best_friends')\n response = self._submit_query(query)\n return [(elem['name']['value'], elem['num_chat']['value'].split('/')[-1]) for elem in response]", "def get_all_users():", "def createFriendlyEmpireList(self):\n friends = []\n for shipID, myShip in self.myGalaxy.ships.iteritems():\n if shipID not in self.targets and myShip.empireID not in friends:\n friends.append(myShip.empireID)\n return friends", "def add_all_friends(twitter, users):\n ###TODO-- Completed\n\n #calling get_friends here to receive friends ID's for all the values of screen_name,\n # limiting the values to receive to 5000\n for user in users:\n user['friends'] = get_friends(twitter, user['screen_name'])[:5000]\n #print(len(user['friends']))", "def get_friends(twitter, screen_name):\n ###TODO-- Completed\n\n #Requesting twitter with query to get friends of all the screen_name(s) passed as a parameter\n friends_ids = robust_request(twitter,'friends/ids',{'screen_name':screen_name}).json()\n\n #Returns a dictionary having several values, selecting the one which has KEY: ids,\n # i.e. get ids of all the friends in a sorted manner\n return sorted(friends_ids['ids'])", "def search_for_friend():\n\n\n user_id = session['user_id']\n email = request.form.get('email')\n\n user = User.get_user_by_email(email)\n\n user_json = {\n 'first_name': user.first_name, 'last_name': user.last_name, \"friend_id\": user.user_id\n }\n\n return jsonify(user_json)", "def friends(self):\n service_root = self._get_webservice_url(\"fmf\")\n return FindFriendsService(service_root, self.session, self.params)", "def friend(x):\n return [f for f in x if len(f) == 4]", "def add_all_friends(twitter, users):\n for u_dict in users:\n u_dict['friends'] = get_friends(twitter,u_dict['screen_name'])", "def get_user_list(friends_filename: str = \"friends.txt\") -> List:\n users: List[str] = []\n # two cases for running this function --\n # either there is a list already or there is not\n f_path = os.path.join(data_dir, friends_filename)\n if os.path.exists(f_path): # if the friends list is here already... just load it up\n\n with open(f_path, 'r') as fp:\n users = [line.strip() for line in fp.readlines()]\n\n\n else: # if NOT, let's ask for our following list using the twitter api\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n\n for friend in tweepy.Cursor(api.friends).items():\n sleep(1)\n #print(friend.screen_name)\n users.append(friend.screen_name)\n\n # and then write the friends.txt file\n with open(f_path,'w') as fp: # O_CREAT || O_WRONLY\n [fp.write(\"{}\\n\".format(u)) for u in users]\n\n return users", "def flatten_friends_ids(users):\n friends_ids = []\n for user_id in users:\n friends_ids.extend(users[user_id][\"friends_ids\"]) \n return list(set(friends_ids))", "def list_friends(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.retrieve_friends(user_id)", "def vk_friends(request):\n context = {}\n user = request.user\n\n if user.is_authenticated:\n user_social_auth = user.social_auth.get(provider='vk-oauth2')\n access_token = user_social_auth.access_token\n\n api = api_vk.get_vk_api(access_token)\n friends_info = api_vk.get_vk_friends(api)\n account_info = api_vk.get_account_info(api)\n\n context.update({\n \"account_info\": account_info,\n \"friends_info\": friends_info,\n })\n\n return render(request, \"vk_friends.html\", context)", "def get_info_about_friends (cursor, limit, screen_name=None, user_id=None):\n \n # Must have either screen_name or user_id (logical xor)\n assert (screen_name != None) != (user_id != None), \\\n \"Must have screen_name or user_id, but not both\"\n \n assert (limit > 0), \"The requested number of ids must be higher than 0\"\n \n result = []\n needed = limit\n \n # while there are friends to get and the needed number is still positive\n while cursor != 0 and needed > 0:\n # we can retrieve only 5000 at once\n if needed > MAX_ALLOWED_SCRNMS:\n count_limit = MAX_ALLOWED_SCRNMS\n else:\n count_limit = needed\n \n # depends if we have the screen_name or the id of the follower\n if screen_name != None:\n friends_data = twitterapi.make_twitter_request(twitter_api.friends.list, count=count_limit, screen_name=screen_name, cursor=cursor)\n result = result + friends_data[\"users\"]\n else:\n friends_data = twitterapi.make_twitter_request(twitter_api.friends.ids, count=count_limit, user_id=user_id, cursor=cursor)\n result = result + friends_data[\"users\"]\n \n needed = needed - count_limit\n \n # move to next friends that were not retrieved\n cursor = friends_data[\"next_cursor\"]\n \n # returns the needed results\n return result[:limit]", "def get_all_books_from_friends(user, KEY, SECRET):\n\n friends = user.friends\n\n if not friends:\n acct = user.account\n friends = get_user_friends(acct, KEY, SECRET)\n if len(friends) == 0:\n print \"no friends data found\"\n flash(\"Add friends on Goodreads in order to see their reading history\")\n\n for friend in friends:\n # if friend.user_id < 32: # TEMPORARY - prevents duplicate data collection\n # continue\n time.sleep(1.00)\n shelves = check_for_shelves(friend.gr_id, KEY)\n get_books_from_shelf(friend.gr_id, 'read', KEY)\n get_books_from_shelf(friend.gr_id, 'currently-reading', KEY)\n print \"Got all books for user \" + friend.gr_url\n\n return", "def get_reachable_friends(self, name):\n distance_to_be_reachable = 25\n ant = self.ants[name]\n reachable_friends = [friend for friend in self.ants\n if 3 < distance_to(self.ants[friend].xcor,\n self.ants[friend].ycor,\n ant.xcor, ant.ycor) < distance_to_be_reachable]\n return reachable_friends", "def get_user_friends(acct, KEY, SECRET): # this isn't true - evaluate what needs to be returned tomorrow.\n\n new_gr_session = OAuth1Session(\n consumer_key=KEY,\n consumer_secret=SECRET,\n access_token=acct.access_token,\n access_token_secret=acct.access_token_secret\n )\n\n user_id = str(acct.user.gr_id)\n current_page = 1\n\n total, friends = get_friends_page(new_gr_session, user_id, current_page)\n\n # check for no friends first\n if len(friends) == 0:\n flash(\"No Goodreads friends found.\")\n print \"No friends!\"\n\n # friends requests return a list of 30 at a time\n # get total number of pages required.\n total_pages = int(math.ceil(total / float(30)))\n # creates new users and adds friendship relationships to db\n add_user_friendships(friends, acct)\n\n # check for more than 30 friends\n if total_pages > 1:\n\n current_page = 2\n while current_page <= total_pages:\n\n print \"******YOU HAVE MORE FRIENDS*******\"\n\n # wait 1 second between calls, per GR policy\n time.sleep(1.00)\n\n # create new query with updated current_page\n total, friends = get_friends_page(new_gr_session, user_id, current_page)\n add_user_friendships(friends, acct)\n current_page += 1\n\n return None", "def check_remote_friends(author):\n auth_id = author.id\n auth_url = author.url\n\n pending = PendingRemoteFriend.objects.all().filter(author=author).first()\n\n if pending:\n remote_friends = []\n hostname = pending.host\n if not pending.host.endswith(\"/\"):\n hostname = pending.host + \"/\"\n server = pending.server\n\n try:\n\n if server.username and server.password:\n friend_id = pending.friend\n raw_id = friend_id.split(\"/\")[-1]\n friends_api = \"{}author/{}/friends/{}\".format(hostname, raw_id, auth_id)\n r = requests.get(friends_api, auth=(server.username, server.password))\n f_content = r.json()\n is_friend = f_content['friends']\n if is_friend:\n friend_dict = {'id': pending.url, 'host': pending.host,\n 'displayName': pending.displayName, 'url': pending.url}\n remote_friends.append(friend_dict)\n pending.delete()\n\n remoteF = RemoteFriend.objects.all().filter(author=author, friend=friend_id)\n if remoteF:\n pass\n else:\n RemoteFriend.objects.create(author=author, friend=pending.friend, host=pending.host,\n displayName=pending.displayName, url=pending.url, server=server)\n\n return remote_friends\n\n except:\n print(\"error\")\n else:\n return []", "def getResponsibleUsers():", "def count_friends(users):\n ###TODO-- Completed\n\n #Creating a Counter object, to count the mapping\n c = Counter()\n c.update(friend_id for user in users for friend_id in user['friends'])\n return c", "def fetch_friends(self, user, paginate=False):\n\n if USING_ALLAUTH:\n social_app = SocialApp.objects.get_current('facebook')\n oauth_token = SocialToken.objects.get(account=user, app=social_app).token\n else:\n social_auth_backend = FacebookBackend()\n\n # Get the access_token\n tokens = social_auth_backend.tokens(user)\n oauth_token = tokens['access_token']\n\n graph = facebook.GraphAPI(oauth_token)\n\n friends = graph.get_connections(\"me\", \"friends\")\n\n if paginate:\n total_friends = friends.copy()\n total_friends.pop('paging')\n while 'paging' in friends and 'next' in friends['paging'] and friends['paging']['next']:\n next_url = friends['paging']['next']\n next_url_parsed = urlparse.urlparse(next_url)\n query_data = urlparse.parse_qs(next_url_parsed.query)\n query_data.pop('access_token')\n for k, v in query_data.items():\n query_data[k] = v[0]\n friends = graph.get_connections(\"me\", \"friends\", **query_data)\n total_friends['data'] = sum([total_friends['data'], friends['data']], [])\n else:\n total_friends = friends\n\n return total_friends", "def get_friend_status(self):\n # See what kind of URL we need to assemble and open\n self.parse_steamid()\n\n # If parse_steamid() failed, send us back\n if self.url == None:\n print \"Failed to parse friend URL\"\n return (None,None)\n\n content = urllib2.build_opener()\n content.addheaders = [('User-agent', 'Mozilla/5.0')]\n try:\n data = content.open(self.url).read()\n content.close()\n except ValueError, e:\n\t\t\treturn (None,None)\n except urllib2.URLError, e:\n print 'Failed to connect'\n print 'Reason: ',e.reason\n return (None,None)\n except urllib2.HTTPError, e:\n print 'Remote server error'\n print 'Error code: ',e.code\n return (None,None)\n doc = fromstring(data)\n doc.make_links_absolute(self.url)\n\n cre = re.compile(r'steam://connect/(.*)')\n cre_game = re.compile(r'In-Game(.*)\\s')\n\n in_game_friends = []\n online_friends = []\n\n # Parse in game friends and their information \n\t\t# This ends up being a list of dictionaries\n f = {}\n for i in doc.find_class('linkFriend_in-game'):\n f_ingame = i.text_content()\n game = cre_game.search(f_ingame)\n\n for (element, attr, link, pos) in i.iterlinks():\n s = cre.search(link)\n if s:\n f['server'] = s.group(1)\n if 'server' not in f:\n f['server'] = 'No server'\n if game:\n g = re.sub('\\s-\\sJoin', '', re.sub(r'\\s\\s', '', game.group(1)))\n f['game'] = g\n else:\n f['friend'] = f_ingame\n # If all elements were filled out above during the iterative passes,\n # go ahead and append it to the list of hashes, and start anew\n if 'server' in f and 'friend' in f and 'game' in f:\n in_game_friends.append(f)\n f = {}\n\n # Online friends \n for i in doc.find_class('linkFriend_online'):\n online_friends.append(\"%s\" % (i.text_content()))\n\n\t\t# This returns in_game_friends with a list containing dictionaries, and\n\t\t# online_friends, which is a list of friends that are just online\t\n return (in_game_friends,online_friends)", "def jsonrpc_friendOf(self, screenname1, screenname2):\n log.msg('FriendOf %r and %r.' % (screenname1, screenname2))\n d = ftwitter.friendOf(self.cache, self.endpoint,\n unicode(screenname1), unicode(screenname2))\n d.addCallback(lambda result: {'result': result})\n d.addErrback(log.err)\n return d", "def get_cached_friends(self) -> Optional[List[types.FriendInformation]]:\n raise NotImplementedError", "def get_clubs_of_friends(person_to_friends: Dict[str, List[str]],\n person_to_clubs: Dict[str, List[str]],\n person: str) -> List[str]:\n \n clubs_of_friends = []\n if person not in person_to_friends:\n return []\n for friends in person_to_friends[person]:\n if friends in person_to_clubs:\n for clubs in person_to_clubs[friends]:\n if (person not in person_to_clubs) or \\\n (clubs not in person_to_clubs[person]):\n clubs_of_friends.append(clubs)\n clubs_of_friends.sort()\n return clubs_of_friends", "def get_blocked_usernames_list():\n return []", "def friends(self, node, current_date):\n\n friends = []\n\n for friend in self.network.successors(node):\n # return friends which edge node->friends was created before the current date\n if (self.network[node][friend][self.EDGE_CREATE_TIME] <= current_date):\n friends.append(friend)\n return friends", "def main():\n\n access_token = ('')\n\n # Get list of friend id numbers.\n url = ('https://graph.facebook.com/' +\n 'fql?q=SELECT uid2 FROM friend WHERE uid1=me()')\n\n content = simplejson.loads(urllib2.urlopen(url).read())\n content = [i['id'] for i in content['data']]\n\n connections = ['activities', 'adaccounts', 'albums',\n 'apprequests', 'books', 'checkins', 'events',\n 'family', 'feed', 'friendlists', 'friendrequests',\n 'friends', 'games', 'groups', 'home', 'inbox',\n 'interests', 'likes', 'links', 'locations',\n 'messagingfavorites', 'movies', 'music', 'mutualfriends',\n 'notes', 'notifications', 'outbox', 'payments',\n 'permissions', 'photos', 'posts', 'scores',\n 'statuses', 'tagged', 'television', 'updates', 'videos']\n\n for i in content:\n node = {}\n timestamp = datetime.datetime.utcnow().strftime(\"%s.%f\")\n node['timestamp'] = timestamp\n url = ('https://graph.facebook.com/' +\n i +\n '/?access_token=' +\n access_token)\n j = simplejson.loads(urllib2.urlopen(url).read())\n node[i] = [{k: j[k]} for k in j.keys()]\n for k in connections:\n if k == 'mutualfriends':\n url = ('https://graph.facebook.com/me/mutualfriends/' +\n j['id'] +\n '/?access_token=' +\n access_token)\n else:\n url = ('https://graph.facebook.com/' +\n j['id'] +\n '/' +\n k +\n '?access_token='\n + access_token)\n try:\n #print('{0}: {1}; {2}: {3}').format('connection', k, 'URL', url)\n l = simplejson.loads(urllib2.urlopen(url).read())\n node[k] = [m for m in l['data']]\n except urllib2.HTTPError, e:\n pass\n print(node)", "def users(message):\n message.reply(Strings['USERS_FOUND'].format(len(hf.get_users())))", "def do_list(self, arg):\n print('The roster includes the following members:\\n')\n lines = formatTable(\n map(self.memberToList, self.roster),\n [\n ColumnFormat('id', 4),\n ColumnFormat('name', 30),\n ColumnFormat('introduced', 12)\n ]) \n for line in lines: \n print(line)", "def getFriends(id):\n u = models.User.query.get(id)\n if not u:\n return jsonify({'error': 'No account found'}), 200\n\n if not u.isFb:\n if int(u.fbid) is not 0:\n u = models.User.query.get(u.fbid)\n if not u.isFb and int(u.fbid) is not 0:\n u = models.User.query.get(u.fbid)\n else:\n return jsonify({'error': 'No account found'}), 200\n\n session['oauth_token'] = (u.token, '')\n resp = facebook.get('/' + u.fbid + '/friends')\n friends = []\n for f in resp.data['data']:\n friends.append(f['id'])\n\n friends_json = []\n for f in friends:\n u = models.User.query.filter_by(fbid=f).first()\n user = {\n 'id': u.id,\n 'name': u.name,\n 'email': u.email,\n 'regID': u.regid,\n 'photo': u.photo\n }\n friends_json.append(user)\n return jsonify({'friends': friends_json}), 200", "def get_friendliness(self):\n trait = self.traitDao.get_friendliness(self.name)\n friendliness = trait[1]\n return friendliness", "def list( self, mess, args):\n user = self.get_sender_username(mess)\n args = args.replace(' ', '_')\n if user in self.users:\n user_list = 'All these users are subscribed - \\n'\n user_list += '\\n'.join(['%s :: %s' %(u, self.users[u]) for u in sorted(self.users)])\n if self.invited.keys():\n user_list += '\\n The following users are invited - \\n'\n user_list += '\\n'.join(self.invited.keys())\n self.log.info( '%s checks list of users.' % user)\n return user_list", "def search_helper(name_list, operation, twitter_dict): \r\n return_list = []\r\n \r\n for name in name_list:\r\n if operation == 'following':\r\n search_specified_list = twitter_dict[name]['following']\r\n for following_names in search_specified_list: \r\n if following_names not in return_list: \r\n return_list.append(following_names) \r\n \r\n elif operation == 'followers':\r\n followers = all_followers(twitter_dict, name)\r\n for followers_name in followers: \r\n if followers_name not in return_list: \r\n return_list.append(followers_name) \r\n \r\n return return_list", "def displayFolowers(database):\n firstname=str(input(\"who do you want to display followers :\"))\n usr,find=getByName(database,firstname)\n if find:\n print(f\"{usr.firstname} {usr.lastname} is folowed by:\")\n for folower in usr.folowed:\n print(folower)", "def get_common_friends(user, friends, friends_of_friends, data):\n common_friends_list = {}\n friends_set = set(friends)\n for friend_of_friend in list(set(friends_of_friends)):\n if int(friend_of_friend) != user and friend_of_friend not in friends:\n friend_of_friend_list = get_friends(friend_of_friend, data)\n score = len(list(friends_set.intersection(friend_of_friend_list)))\n if score in common_friends_list:\n common_friends_list[score].append(friend_of_friend)\n else:\n common_friends_list[score] = [friend_of_friend]\n return common_friends_list", "def number_of_friends(user):\n user_id = user[\"id\"]\n friend_ids = friendships[user_id]\n return len(friend_ids)", "def all_followers(twitter_dict, twitter_name): \r\n \r\n following_list = []\r\n for user in twitter_dict:\r\n f_list = twitter_dict[user]['following']\r\n if twitter_name in f_list:\r\n following_list.append(user) \r\n return following_list", "def get_edges(profile_id: str, friends: list, token: str, api_version: str) -> dict:\n method = 'friends.getMutual'\n mutuals = {}\n for friend in tqdm(friends):\n friend_id = friend['id']\n prefix = 'https://api.vk.com/method'\n url = f'{prefix}/{method}?source_uid={profile_id}&target_uid={friend_id}&access_token={token}&v={api_version}'\n friends_ids = requests.get(url).json().get('response')\n if friends_ids != None:\n # filter out deleted profiles\n mutuals[friend_id] = friends_ids\n # wait to prevent exceeding the nemuber of requests per second\n time.sleep(3)\n return mutuals", "def get_friends(\n self, recent_tracks: bool, limit: int = 50, page: int = 1\n ) -> ListModel[\"User\"]:\n return self.retrieve(\n bind=User,\n flatten=\"user\",\n params=dict(\n method=\"user.getFriends\",\n user=self.name,\n recenttracks=recent_tracks,\n page=page,\n limit=limit,\n ),\n )" ]
[ "0.71894276", "0.7112803", "0.70884013", "0.7054558", "0.7027464", "0.698504", "0.6922425", "0.68556184", "0.68189013", "0.67964447", "0.6792341", "0.6780698", "0.6738868", "0.67151695", "0.66937727", "0.6683127", "0.66392064", "0.6594076", "0.65758586", "0.6565678", "0.65209866", "0.6488219", "0.6481183", "0.6462359", "0.6399978", "0.6394691", "0.639066", "0.63809735", "0.63747114", "0.6359072", "0.6357154", "0.63563544", "0.6337959", "0.63089263", "0.6279081", "0.6272224", "0.62560046", "0.62551177", "0.62551177", "0.62551177", "0.62551177", "0.62520915", "0.6248122", "0.6243714", "0.62164646", "0.61798257", "0.61794895", "0.6155579", "0.6142202", "0.61179197", "0.610422", "0.60860425", "0.6085542", "0.60735947", "0.6061409", "0.60466313", "0.60418266", "0.6032688", "0.6019468", "0.6018288", "0.6013095", "0.60086894", "0.6005029", "0.5995047", "0.59917146", "0.59913945", "0.5981712", "0.597885", "0.5976774", "0.59557146", "0.5909856", "0.5902214", "0.59003067", "0.58972365", "0.5892256", "0.5881298", "0.5869552", "0.586738", "0.5865165", "0.58643204", "0.58630794", "0.585137", "0.5850224", "0.5833273", "0.58228326", "0.5822417", "0.5818467", "0.58121336", "0.58062977", "0.5803093", "0.5798232", "0.579384", "0.579275", "0.5787485", "0.5786997", "0.5777056", "0.5774117", "0.5772861", "0.57636476", "0.57497877" ]
0.67554355
12
onehot encode categorical, normalize scalar/player_id inputs
def preprocess_minimap(minimap): layers = [] for i in range(len(features.MINIMAP_FEATURES)): ## scalar or to large to do one-hot if i == _MINIMAP_SELECTED: layers.append(minimap[i:i+1] / features.MINIMAP_FEATURES[i].scale) ## categorical elif i == _MINIMAP_PLAYER_RELATIVE: layer = np.zeros([features.MINIMAP_FEATURES[i].scale, minimap.shape[1], minimap.shape[2]], dtype=np.float32) for j in range(features.MINIMAP_FEATURES[i].scale): indy, indx = (minimap[i] == j).nonzero() layer[j, indy, indx] = 1 layers.append(layer) return np.concatenate(layers, axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __encode_one_hot_util(self):\n for col in self.cat_cols:\n if (\n col in self.train_df\n and col + str(\"Encoded\") not in self.ord_cols\n ):\n if self.test_df is not None:\n self.test_df = pd.concat(\n [\n self.test_df,\n pd.get_dummies(\n self.test_df[col], prefix=col\n ).astype(\"category\"),\n ],\n axis=1,\n )\n self.train_df = pd.concat(\n [\n self.train_df,\n pd.get_dummies(self.train_df[col], prefix=col).astype(\n \"category\"\n ),\n ],\n axis=1,\n )", "def one_hot_encode(df, col):\n return pd.get_dummies(df, columns=[col], drop_first=True)", "def encode_one_hot(s):\n all = []\n for c in s:\n x = np.zeros((INPUT_VOCAB_SIZE)) \n index = char_indices[c]\n x[index] = 1 \n all.append(x)\n return all", "def one_hot_encode(df, ohe_cols):\n return pd.get_dummies(df, columns=ohe_cols)", "def one_hot_encode(self, arr, n_labels):\n one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32)\n one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.\n one_hot = one_hot.reshape((*arr.shape, n_labels))\n \n return one_hot", "def one_hot_encoding(data):\n\n data_encoded = pd.get_dummies(data)\n\n return data_encoded", "def one_hot_encode(x_: ArrayLike) -> tuple[IntArray, dict[str, int]]:\n x: np.ndarray = np.copy(x_)\n if x.ndim == 1:\n x = x[:, np.newaxis]\n shape = x.shape\n has_na = np.any(pd.isna(x))\n if x.dtype == object:\n x = x.astype(str)\n categories, codes = np.unique(x, return_inverse=True)\n num_classes = len(categories)\n encoded_x = np.zeros((x.size, num_classes), dtype=np.uint8)\n encoded_x[np.arange(x.size), codes.astype(np.uint8).ravel()] = 1\n encoded_x = encoded_x.reshape(*shape, num_classes)\n if has_na:\n # remove NaN column\n categories = categories[:-1]\n encoded_x = encoded_x[:, :, :-1]\n mapping = {\n _category_name(category): code for code, category in enumerate(categories)\n }\n return encoded_x, mapping", "def pre_process_data(df):\n\n # one-hot encode categorical values\n df = pd.get_dummies(df)\n\n return df", "def onehot_encode_labels(y):\n\treturn OneHotEncoder(categories=\"auto\", sparse=False).fit_transform(y.reshape(y.shape[0],1))", "def one_hot_encode(x):\n\n # check if encoder has been previously created, if not make a global var an initialize it\n if 'encoder' not in globals():\n global encoder\n encoder = LabelBinarizer()\n encoder.fit(range(10))\n\n return encoder.transform(x)", "def one_hot_encoding(data):\r\n encoder = LabelEncoder()\r\n y = encoder.fit_transform(data)\r\n return(y)", "def _one_hot_encoder(self):\n ohe = preprocessing.OneHotEncoder()\n ohe.fit(self.dataframe[self.cat_feats])\n return ohe.transform(self.dataframe_d_copy[self.cat_feats])", "def one_hot_encode(x):\n # TODO: Implement Function\n lb = preprocessing.LabelBinarizer()\n lb.fit([0,1,2,3,4,5,6,7,8,9])\n \n return lb.transform(x)", "def one_hot_encoding(sequence):\n\n mydict = {\n \"A\": np.asarray([1, 0, 0, 0]),\n \"a\": np.asarray([1, 0, 0, 0]),\n \"C\": np.asarray([0, 1, 0, 0]),\n \"c\": np.asarray([0, 1, 0, 0]),\n \"G\": np.asarray([0, 0, 1, 0]),\n \"g\": np.asarray([0, 0, 1, 0]),\n \"T\": np.asarray([0, 0, 0, 1]),\n \"t\": np.asarray([0, 0, 0, 1]),\n \"Y\": np.asarray([0, 1, 0, 1]),\n \"y\": np.asarray([0, 1, 0, 1]),\n \"R\": np.asarray([1, 0, 1, 0]),\n \"r\": np.asarray([1, 0, 1, 0]),\n \"S\": np.asarray([0, 1, 1, 0]),\n \"s\": np.asarray([0, 1, 1, 0]),\n \"W\": np.asarray([1, 0, 0, 1]),\n \"w\": np.asarray([1, 0, 0, 1]),\n \"K\": np.asarray([0, 0, 1, 1]),\n \"k\": np.asarray([0, 0, 1, 1]),\n \"M\": np.asarray([1, 1, 0, 0]),\n \"m\": np.asarray([1, 1, 0, 0]),\n \"B\": np.asarray([0, 1, 1, 1]),\n \"b\": np.asarray([0, 1, 1, 1]),\n \"D\": np.asarray([1, 0, 1, 1]),\n \"d\": np.asarray([1, 0, 1, 1]),\n \"H\": np.asarray([1, 1, 0, 1]),\n \"h\": np.asarray([1, 1, 0, 1]),\n \"V\": np.asarray([1, 1, 1, 0]),\n \"v\": np.asarray([1, 1, 1, 0]),\n \"N\": np.asarray([0, 0, 0, 0]),\n \"n\": np.asarray([0, 0, 0, 0]),\n \"-\": np.asarray([0, 0, 0, 0]),\n }\n print(f\"Seq: {sequence}\")\n if len(sequence) > 0:\n nuc_list = list()\n for nuc in list(sequence):\n nuc_list.append(mydict[nuc])\n result = np.stack(np.asarray(nuc_list, dtype=\"int8\"))\n return result\n else: \n print(\"ERROR! sequence is too short\")", "def one_hot_encode(self, meta_field):\n one_hot = pd.get_dummies(self.sample_meta[meta_field]).values\n return one_hot", "def one_hot_encode(x):\n # TODO: Implement Function\n output = np.zeros((len(x), 10))\n \n for i, j in enumerate(x):\n output[i,j] = 1\n \n return output", "def encode_one_hot2(s):\n x = np.zeros((LINE_SIZE, INPUT_VOCAB_SIZE))\n for n, c in enumerate(s):\n index = char_indices[c]\n x[n, index] = 1 \n return x", "def one_hot_encode(x):\n # TODO: Implement Function\n x_l = list(x)\n for index in np.arange(len(x_l)):\n x_l[index] = get_one_hot_vector(x[index])[x[index]]\n return np.array(x_l)", "def _one_hot_encode(label_vector, total_num_labels):\n out = np.zeros(shape=(len(label_vector), total_num_labels))\n for i in range(len(label_vector)):\n out[i, int(label_vector[i])] = 1\n return out", "def one_hot_encoding(labels, num_classes=10):\n num_labels = labels.shape[0]\n encoded = np.zeros((num_labels, num_classes))\n encoded[np.arange(num_labels), labels[np.arange(num_labels)]] = 1\n \n return encoded", "def one_hot_encoding(y):\n\n y_oh = np.zeros((y.shape[0], y.max() - y.min() + 1))\n\n # currently only works in min is actually 0\n for j in range(0, y_oh.shape[1]):\n y_oh[np.where(y == j), j] = 1\n\n return y_oh", "def one_hot_converter(column):\n # encode class values as integers\n encoder = LabelEncoder()\n encoder.fit(column) \n encoded_ = encoder.transform(column)\n # convert integers to dummy variables, i.e., one-hot encoded\n encoded_column = to_categorical(encoded_)\n \n return encoded_column", "def one_hot_enc(self, word):\n word = self.text_to_int(word)\n word = Variable(torch.tensor(word))\n word = torch.nn.functional.one_hot(word, len(self.index_map))\n return word.transpose(0, 1)", "def one_hot_encode(y, out_size):\n n = len(y)\n oh = np.zeros((n, out_size))\n oh[range(n), y] = 1\n return oh", "def preprocess_dataset(x, y):\n # Add the channel dimension.\n x = np.expand_dims(x, axis=-1)\n # Rescale to [-1, 1].\n x = x.astype(np.float32)\n x /= 128\n x -= 0.5\n # One-hot encode the labels.\n y = utils.to_categorical(y)\n return (x, y)", "def make_onehot(x,num_labels=7):\n enc = OneHotEncoder(n_values=num_labels)\n return enc.fit_transform(np.array(x).reshape(-1, 1)).toarray()", "def one_hot_encode(self, y: np.ndarray) -> np.ndarray:\n return np.eye(self.output_size)[y]", "def one_hot(y_):\n y_ = y_.reshape(len(y_))\n n_values = int(np.max(y_)) + 1\n n_values = 6\n return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS", "def one_hot_encoding(data_dict):\n merged_seq = []\n merged_label = []\n for key in __MERGE_KEYS__:\n merged_seq += list(data_dict[key])\n merged_label += list([__MERGE_LABELS__[key]] * len(data_dict[key]))\n merged_seq_int = list(map(map_base_to_int, merged_seq))\n\n X = to_categorical(merged_seq_int)\n Y = to_categorical(merged_label)\n return X, Y", "def one_hot_encode_single(mapping: dict[str, int], value: Optional[str]) -> IntArray:\n encoded_value = np.zeros((1, len(mapping)))\n if not pd.isna(value):\n code = mapping[str(value)]\n encoded_value[0, code] = 1\n return encoded_value", "def convert_to_one_hot(a):\n a = a[:, 0]\n a = a.astype(int)\n A = np.zeros((len(a), config.num_classes))\n A[np.arange(len(a)), a] = 1\n return A", "def encoding_onehot(df, target=None):\n if not target:\n target = ['user_type', 'city']\n for col in target:\n # Following is exactily the df.join() but is inplace.\n one_hot = pandas.get_dummies(df[col])\n for item in one_hot:\n df[item] = one_hot[item]\n df.drop([col], axis=1, inplace=True)\n return None", "def one_hot(df):\r\n # One-hot encode into \r\n cols = ['job', 'marital', 'education', 'month', 'day_of_week', 'poutcome']\r\n for each in cols:\r\n dummies = pd.get_dummies(df[each], prefix=each, drop_first=False)\r\n df = pd.concat([df, dummies], axis=1)\r\n df = df.drop(cols,axis=1)\r\n return df", "def _onehot(y, n_classes=False):\n if not n_classes:\n \"\"\"Create one-hot encoded labels.\"\"\"\n n_classes = len(set(y))\n out = np.zeros((len(y), n_classes))\n for i, ii in enumerate(y):\n out[i][ii] += 1\n y_onehot = out.astype(int)\n return y_onehot", "def one_hot_encode(x, n_classes):\n return np.eye(n_classes)[x]", "def one_hot_encoder(df, cols):\r\n\r\n for col in cols:\r\n if(\"admission\" in col):\r\n dummies = pd.get_dummies(df[col], drop_first=False)\r\n else:\r\n dummies = pd.get_dummies(df[col], prefix=col, drop_first=False)\r\n df = pd.concat([df, dummies], axis=1) \r\n df.drop([col],axis=1, inplace=True)\r\n return df", "def one_hot_encode(idx, vocab_size):\n # Initialize the encoded array\n one_hot = np.zeros(vocab_size)\n \n # Set the appropriate element to one\n one_hot[idx] = 1.0\n\n return one_hot", "def _maybe_one_hot(self, obs):\n if self.toOneHot:\n obs = np.reshape(obs, (1, -1))\n ints = obs.dot(self.multiplication_factor)\n x = np.zeros([obs.shape[0], self.one_hot_len])\n for i, j in enumerate(ints):\n x[i, j] = 1\n return x\n else:\n return obs", "def To1hot(label,num_class):\n onehot = np.zeros(num_class)\n onehot[label] = 1\n return onehot", "def _onehot(integer_labels):\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot", "def onehot(inputs, num_classes):\n num_sample = inputs.size(0)\n inputs_onehot = torch.zeros(num_sample, num_classes)\n inputs_onehot.scatter_(1, inputs.unsqueeze(1), 1.0)\n return inputs_onehot", "def one_hot_enc(df, ohe_cols):\n for col in ohe_cols:\n df_ohe = pd.get_dummies(df[col], preffix=col)\n df = pd.concat([df, df_ohe], axis=1)\n df.drop(col, axis=1, inplace=True)\n return df", "def onehot_encoding(labels, dim, device):\n out = th.zeros(list(labels.size()) + [dim]).to(device)\n out.scatter_(len(out.size()) - 1, labels.unsqueeze(-1), 1.0)\n return out", "def one_hot_encode(gt_image):\n\n # One hot encoding of each pixel according to the CVPR2018 classes\n label_ohe = list(map(lambda x: tf.to_float(tf.equal(gt_image // 1000, x)), cvpr2018_labels()))\n # Stack everything together\n return tf.stack(label_ohe, axis=-1)", "def to_one_hot_encoding(target_data):\n target_data = target_data.squeeze()\n n_class = len(np.unique(target_data))\n res = np.eye(n_class)[target_data.astype(int)]\n return res", "def one_hot_encoder(df, nan_as_category=True):\n original_columns = list(df.columns)\n categorical_columns = [col for col in df.columns if\n df[col].dtype == 'object']\n df = pd.get_dummies(df, columns=categorical_columns,\n dummy_na=nan_as_category)\n new_columns = [c for c in df.columns if c not in original_columns]\n return df, new_columns", "def one_hot_encode(labels, num_classes=None):\n if num_classes is None:\n num_classes = len(np.unique(labels))\n return np.eye(num_classes)[labels]", "def one_hot_encode(labels, num_classes=None):\n if num_classes is None:\n num_classes = len(np.unique(labels))\n return np.eye(num_classes)[labels]", "def onehot_encode_y(y, num_class):\n # Assertions\n assert isinstance(y, np.ndarray), \\\n 'y must be a numpy ndarray'\n assert isinstance(num_class, int), \\\n 'num_class must be an int'\n # Functionality\n one_hot = np.zeros((y.shape[0],num_class),dtype=np.int8)\n for index, cls in enumerate(y):\n one_hot[index, int(cls)] = 1\n\n return one_hot", "def onehot(trace):\n encoded_trace = np.zeros((len(trace), 3), dtype=int)\n encoded_trace[np.arange(len(trace)), trace] = 1\n return encoded_trace.flatten()", "def __one_hot(self, y):\n y_one_hot = np.zeros((y.size, y.max() + 1))\n y_one_hot[np.arange(y.size), y] = 1\n \n return y_one_hot", "def one_hot_encode(self, columns):\n if not columns:\n return None\n\n logging.info('one-hot-encoding columns: %s' % ','.join(columns))\n\n both_sets = pd.concat((self.train[columns], self.test[columns]))\n encoder = preprocessing.OneHotEncoder()\n encoded = encoder.fit_transform(both_sets).sorted_indices()\n\n # Split apart train and test set arrays after one-hot encoding.\n nd_train = self.train.shape[0]\n train_enc = encoded[:nd_train]\n test_enc = encoded[nd_train:]\n\n for col in columns:\n logging.debug('unique {}: {}'.format(col, both_sets[col].unique()))\n\n # Create a feature map for decoding one-hot encoding.\n counts = np.array([both_sets[col].unique().shape[0] for col in columns])\n fmap = []\n for i, column in enumerate(columns):\n unique_elements = np.sort(both_sets[column].unique())\n logging.debug('unique elements for col {}: {}'.format(\n column, unique_elements))\n fmap += ['%s-%d' % (column, idx) for idx in unique_elements]\n\n logging.info('after one-hot encoding, found # unique values:')\n for attr, n_values in zip(columns, counts):\n logging.info('%s: %d' % (attr, n_values))\n\n return train_enc, test_enc, fmap, encoder", "def process_categorical_data(data_df):\n return pd.get_dummies(data_df, columns=Columns.categorical)", "def one_hot_encode(label, label_values):\n semantic_map = []\n for colour in label_values:\n equality = np.equal(label, colour)\n class_map = np.all(equality, axis = -1)\n semantic_map.append(class_map)\n semantic_map = np.stack(semantic_map, axis=-1)\n\n return semantic_map", "def encode_labels(self, y, num_labels):\n onehot = np.zeros((num_labels, y.shape[0]))\n for i in range(y.shape[0]):\n onehot[y[i], i] = 1.0\n return onehot", "def encode_onehot(df, cols):\n vec = DictVectorizer()\n \n vec_data = pd.DataFrame(vec.fit_transform(df[cols].to_dict(outtype='records')).toarray())\n vec_data.columns = vec.get_feature_names()\n vec_data.index = df.index\n \n df = df.drop(cols, axis=1)\n df = df.join(vec_data)\n return df", "def _one_hot(self, key):\n train_df = self.train.copy(deep=False)\n one_hot = pd.get_dummies(train_df[key],drop_first=True)\n one_hot.columns=[key+'_'+col for col in one_hot.columns.values]\n train_df = pd.concat([train_df,one_hot],axis=1,sort=True)\n train_dfg = train_df.groupby('fullVisitorId')\n return train_dfg[one_hot.columns.values].sum()", "def class2onehot(class_labels, seq_len, batchsize, num_task):\n\n\n one_hot = torch.FloatTensor(batchsize,seq_len,num_task)\n one_hot.zero_()\n one_hot = one_hot.scatter_(1, seq_len,class_labels, 1)\n\n return one_hot", "def encode_onehot(df, cols):\n vec = DictVectorizer()\n\n vec_data = pd.DataFrame(vec.fit_transform(df[cols].to_dict(orient='records')).toarray())\n vec_data.columns = vec.get_feature_names()\n vec_data.index = df.index\n\n df = df.drop(cols, axis=1)\n df = df.join(vec_data)\n return df", "def one_hot_encode(df, colnames):\r\n for col in colnames:\r\n oh_df = get_dummies(df[col], prefix=col)\r\n df = concat([oh_df, df], axis=1)\r\n df = df.drop([col], axis=1)\r\n return df", "def one_hot(labels, dim):\n batch_size = labels.size(0)\n out = torch.zeros(batch_size, dim)\n out[np.arange(batch_size), labels.squeeze().long()] = 1\n return out", "def own_OneHotColumnCreator(df, columns):\n for col in cat_attribs:\n for value in df[col].unique():\n df[value] = (df[col] == value).astype(int)", "def one_hot_encode(Y, classes):\n if type(classes) is not int:\n return None\n if Y is None or type(Y) != np.ndarray:\n return None\n for c in Y:\n if c >= classes or c < 0:\n return None\n m = Y.shape[0]\n mtx = np.zeros((m, classes))\n\n for row, c_label in zip(mtx, Y):\n row[c_label] = 1\n\n return mtx.T", "def one_hot(indices, depth):\n # print(indices)\n encoded_indices = torch.zeros(indices.size() + torch.Size([depth])).cuda()\n index = indices.view(indices.size()+torch.Size([1]))\n encoded_indices = encoded_indices.scatter_(1,index,1)\n \n return encoded_indices", "def one_hot_encode_soy(soy_data, classes):\n\n print('[ INFO ]: One-hot-encoding soy data...')\n\n for col in soy_data.columns:\n if col not in ['index', 'attr_35']:\n\n # Find the min and max values in each column to account for all values\n col_min = min(soy_data[col])\n col_max = max(soy_data[col]) + 1\n for u in range(col_min, col_max):\n soy_data[col + '_' + str(u)] = (soy_data[col] == u).astype(int)\n soy_data = soy_data.drop(col, axis=1)\n if col == 'attr_35':\n\n # Create new columns for each unique class\n for u in soy_data[col].unique():\n soy_data['{}_class'.format(u)] = np.where(soy_data[col] == u, 1, 0)\n classes = np.char.replace(classes, u, u + '_class')\n soy_data = soy_data.drop(col, axis=1)\n\n return soy_data, classes", "def conv_y_to_onehot_mat(labels):\n one_idx = np.array(labels)\n nkind = len(np.unique(one_idx))\n nlabels = len(one_idx)\n\n ret = np.zeros((nkind, nlabels))\n ret[one_idx, np.arange(nlabels)] = 1\n return ret", "def to_onehot(labels: torch.Tensor, num_classes: int) -> torch.Tensor:\n if len(labels.size()) == 1:\n return F.one_hot(labels, num_classes).float()\n return labels", "def to_onehot(x, num_classes):\n return np.eye(num_classes, dtype='float32')[x]", "def encode_categorical(df):\n cat_cols = df.select_dtypes(\"category\").columns\n for col in cat_cols:\n df[col] = df[col].cat.codes + 1\n unique_no = len(df[col].unique())\n if unique_no < 50:\n df[col] = df[col].astype(\"uint8\")\n elif unique_no < 16000:\n df[col] = df[col].astype(\"int16\")\n else:\n df[col] = df[col].astype(\"int32\")\n return df", "def preprocess_input(text, tokenizer, max_id):\n X = np.array(tokenizer.texts_to_sequences(text)) - 1\n encoded = tf.one_hot(X, depth=max_id)\n return encoded", "def one_hot(indices, depth):\n\n encoded_indicies = torch.zeros(indices.size() + torch.Size([depth]))\n if indices.is_cuda:\n encoded_indicies = encoded_indicies.cuda() \n index = indices.view(indices.size()+torch.Size([1]))\n encoded_indicies = encoded_indicies.scatter_(1,index,1)\n\n return encoded_indicies", "def to_onehot(value, dim):\n one_hot = torch.zeros(value.shape[0], dim)\n one_hot[torch.arange(value.shape[0]), value.long()] = 1\n return one_hot", "def initialization_based(input_array):\n\n # Search for the unique labels in the array\n oh_array = np.unique(input_array, return_inverse=True)[1]\n\n # Define the shape of the one hot encoded array\n out = np.zeros((oh_array.shape[0], oh_array.max() + 1), dtype=int)\n\n # Set the predicted class on 1, and all the other classes stays at 0\n out[np.arange(out.shape[0]), oh_array] = 1\n\n return out", "def to_one_hot(labels, num_classes):\n shape = labels.size()\n shape = shape + (num_classes,)\n one_hot = torch.FloatTensor(shape)\n one_hot.zero_()\n dim = 1 if len(shape) == 2 else 2\n one_hot.scatter_(dim, labels.unsqueeze(-1), 1)\n return one_hot", "def one_hot(labels, classes=None):\n return K.utils.to_categorical(labels, classes)", "def get_mnist(one_hot_enc, normalized, flatten):", "def one_hot(labels):\n one_hot_labels = np.zeros(labels.shape + (n_actions,))\n for c in range(n_actions):\n one_hot_labels[labels == c, c] = 1.0\n return one_hot_labels", "def to_one_hot(y, depth=None):\n y_flat = y.to(torch.int64).view(-1, 1)\n depth = depth if depth is not None else int(torch.max(y_flat)) + 1\n y_one_hot = torch.zeros(y_flat.size()[0], depth, device=y.device).scatter_(1, y_flat, 1)\n y_one_hot = y_one_hot.view(*(tuple(y.shape) + (-1,)))\n return y_one_hot", "def one_hot(x, num_classes, dtype=jnp.float32):\n return jax.nn.one_hot(x, num_classes).astype(dtype)", "def dense_to_one_hot(labels_dense, num_classes):\r\n print ('in onehot', labels_dense, num_classes)\r\n num_labels = labels_dense.shape[0]\r\n index_offset = numpy.arange(num_labels) * num_classes\r\n labels_one_hot = numpy.zeros((num_labels, num_classes))\r\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\r\n return labels_one_hot", "def binary_to_one_hot(arr: np.ndarray) -> np.ndarray:\n res = np.zeros((arr.shape[0], 2))\n res[np.where(arr == 1)[0], 0] = 1\n res[np.where(arr == 0)[0], 1] = 1\n return res", "def label2onehot(self, batch_size, labels):\r\n dim = 6\r\n out = torch.zeros(batch_size, dim)\r\n out[np.arange(batch_size), labels] = 1\r\n return out", "def label_to_one_hot(label, num_of_class=2):\r\n import numpy as np\r\n one_hot = np.zeros((len(label), num_of_class), dtype=np.uint8)\r\n for i in range(len(label)):\r\n one_hot[i, int(label[i] - 1)] = 1 # label is 1 and 2\r\n\r\n return one_hot", "def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad=True)", "def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad=True)", "def __encode_categorical_util(self):\n cat = []\n # cat = self.cat_cols\n for col in self.cat_cols:\n if (\n col in self.train_df\n and col + str(\"Encoded\") not in self.ord_cols\n ):\n if self.test_df is not None:\n self.test_df[col + str(\"Encoded\")] = pd.factorize(\n self.test_df[col]\n )[0]\n self.test_df[col + str(\"Encoded\")] = self.test_df[\n col + str(\"Encoded\")\n ].astype(\"category\")\n self.train_df[col + str(\"Encoded\")] = pd.factorize(\n self.train_df[col]\n )[0]\n self.train_df[col + str(\"Encoded\")] = self.train_df[\n col + str(\"Encoded\")\n ].astype(\"category\")\n cat.append(str(col + str(\"Encoded\")))\n self.cat_cols += cat", "def decode_one_hot(x):\n s = []\n for onehot in x:\n one_index = np.argmax(onehot)\n c = indices_char[one_index]\n s.append(c) \n return ''.join(s)", "def chars_to_onehot(self, char_list):\n int_list = self.char_to_int(char_list)\n one_hot = np.zeros((len(self.unique_chars), len(int_list)))\n for i,int_elem in enumerate(int_list):\n one_hot[int_elem,i] = 1\n return one_hot", "def encode_one_hot(df, col_names) -> pd.DataFrame:\n for col_name in col_names:\n encoded = pd.get_dummies(df[col_name], prefix=col_name, drop_first=False)\n df = pd.concat([df, encoded], axis=1)\n df = df.drop([col_name], axis=1)\n return df", "def make_one_hot(y):\n one_hot = np.zeros((len(y), 10))\n for i in range(len(y)):\n one_hot[i, y[i]] = 1.\n return one_hot.transpose(1,0)", "def one_hot_encoding(self):\n \n try: \n\n # Encode dependent variable\n le = LabelEncoder()\n le.fit(self.data[\"consumption\"])\n df_dependent_enc = pd.DataFrame(le.transform(self.data[\"consumption\"]))\n\n # Encode independent variable\n categorical_features = Config.FEATURE_DEFINITION[\"category_cols\"]\n categorical_df = self.data.loc[:, self.data.columns.isin(categorical_features)]\n oe = OrdinalEncoder()\n oe.fit(categorical_df)\n df_catindependent_enc = pd.DataFrame(oe.transform(categorical_df))\n df_catindependent_enc.columns = categorical_df.columns\n\n except KeyError: \n\n st.write(\"Cannot perform one-hot encoding for numerical variables. Please check if variables are properly defined.\")\n st.write(self.data.columns != \"consumption\")\n df_dependent_enc = []\n df_catindependent_enc = []\n\n else:\n \n return df_dependent_enc, df_catindependent_enc", "def to_categorical(x, n_col=None):\n if not n_col:\n n_col = np.amax(x) + 1\n one_hot = np.zeros((x.shape[0], n_col))\n one_hot[np.arange(x.shape[0]), x] = 1\n return one_hot", "def label2onehot(self, labels, dim):\n batch_size = labels.size(0)\n out = torch.zeros(batch_size, dim)\n out[np.arange(batch_size), labels.long()] = 1\n return out", "def one_hot_encode(y, classes=None):\n reshaped = y.reshape((reduce(mul, y.shape), 1))\n encoder = OneHotEncoder(sparse=False)\n if classes is None:\n encoded = encoder.fit_transform(reshaped)\n else:\n classes = np.array(classes)\n n_classes = classes.shape[0]\n encoder.fit(classes.reshape((n_classes, 1)))\n encoded = encoder.transform(reshaped)\n return encoded.reshape(list(y.shape) + [encoded.shape[1]])", "def onehot_enc(df, categorical_columns, categories):\n noncategorical_cols = [col for col in df.columns if col not in categorical_columns]\n \n enc = OneHotEncoder(categories=categories,\n sparse=False,\n handle_unknown='ignore')\n y = enc.fit_transform(df[categorical_columns].fillna(\"None\"))\n \n ohe_cols = [\n f\"{col}_{c}\" for col, cats in zip(categorical_columns, categories) for c in cats]\n df1 = pd.DataFrame(y, columns=ohe_cols)\n \n output_df = pd.concat([df[noncategorical_cols], df1], axis=1)\n return output_df, ohe_cols", "def one_hot(x):\n cart_pos,cart_vel,pole_ang,pole_vel = x\n\n # Cart position\n discrete_cart_pos = int((cart_pos - CART_POSITION_MIN)/(CART_POSITION_MAX-CART_POSITION_MIN)*4)\n\n # Pole angle\n discrete_pol_ang = int((cart_pos - POLE_ANGLE_MIN)/(POLE_ANGLE_MAX-POLE_ANGLE_MIN)*4)\n\n # Cart velocity\n cart_vel_discretisations = [-1,0,1]\n discrete_cart_vel= 3\n for i,v in enumerate(cart_vel_discretisations):\n if cart_vel < v:\n discrete_cart_vel = i\n break\n\n # Pole tip velocity\n pole_vel_discretisations = [-1,0,1]\n discrete_pole_vel= 3\n for i,v in enumerate(pole_vel_discretisations):\n if pole_vel < v:\n discrete_pole_vel = i\n break\n\n # Convert to one-hot encoding\n x = discrete_cart_pos + discrete_cart_vel*4 + discrete_pol_ang*8 + discrete_cart_vel*12\n output = [0] * ONE_HOT_NUM_FEATURES\n output[x] = 1\n return np.array([output]).transpose()", "def _create_onehot(cls, onnx_node, inputs, opset_version):\n axis = onnx_node.getattr(\"axis\", -1)\n # we move several inputs to singa's attribuates\n # and mark them so we don't use them when we run this operator\n depth = tensor.to_numpy(inputs.pop(1)).astype(np.int32)\n value = tensor.to_numpy(inputs.pop(1))\n onnx_node.consumed_inputs.extend(onnx_node.inputs[1:])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axis, depth, value)", "def process(self, data_set: DataSet):\n logging.info(f\"\\nOne hot encoding DataSet...\")\n if len(data_set.get_categorical_attribute_indexes()) == 0:\n logging.info(f\"No need to one hot encode. No categorical variables left.\")\n return\n\n for index in data_set.get_categorical_attribute_indexes():\n column = data_set.get_attribute_column(index)\n\n unique = np.unique(column)\n logging.info(f\"Attribute index '{index}' has categorical variables: {unique}\")\n unique_dict = {x: i for i, x in enumerate(unique)}\n new_columns = [[0. for i in range(len(data_set))] for j in unique]\n\n for i, value in enumerate(column):\n new_columns[unique_dict[value]][i] = 1.\n\n for col in new_columns:\n data_set.add_column(col)\n\n for index in sorted(data_set.get_categorical_attribute_indexes(), reverse=True):\n data_set.remove_column(index)\n\n logging.info('One hot encoded DataSet:')\n logging.info(data_set.summary())", "def dense_to_one_hot(labels_dense, label):\n num_labels = len(labels_dense)\n index_offset = list(labels_dense).index(label)\n labels_one_hot = np.zeros(num_labels)\n labels_one_hot[index_offset] = 1\n return labels_one_hot", "def onehot(train_cat, test_cat):\n train_enc = []\n test_enc = []\n columns = []\n for c in cols(train_cat):\n train = train_cat.loc[:,c]\n test = test_cat.loc[:,c]\n labels = list(set(train.unique().tolist()) |\n set(test.unique().tolist()))\n\n l_encoder = LabelEncoder()\n l_encoder.fit(labels)\n oh_encoder = OneHotEncoder(sparse=False, n_values=len(labels))\n\n train_ftr = l_encoder.transform(train).reshape(len(train), 1)\n train_enc.append(oh_encoder.fit_transform(train_ftr))\n test_ftr = l_encoder.transform(test).reshape(len(test), 1)\n test_enc.append(oh_encoder.fit_transform(test_ftr))\n\n col_tmp = list(l_encoder.inverse_transform(range(len(labels))))\n columns = columns + [\"%s_%s\" % (c, i) for i in col_tmp]\n\n new_train_cat = pd.DataFrame(np.column_stack(train_enc), columns=columns,\n dtype=np.uint8)\n new_test_cat = pd.DataFrame(np.column_stack(test_enc), columns=columns,\n dtype=np.uint8)\n return new_train_cat, new_test_cat", "def dummify_all_categorical(df):\n\n df = pd.get_dummies(df)\n df = dummify(df, \"detailed industry recode\")\n df = dummify(df, \"detailed occupation recode\") ## add some variables that are encoded as int64 but that are in fact categorical\n return df" ]
[ "0.75467926", "0.71732116", "0.7166452", "0.7154489", "0.71511304", "0.71498376", "0.7130969", "0.71149933", "0.7075103", "0.70738035", "0.7065001", "0.70538414", "0.70359933", "0.70098567", "0.7004153", "0.6951065", "0.69328225", "0.69303125", "0.6927022", "0.692566", "0.6913862", "0.68970597", "0.68882537", "0.68856406", "0.6867454", "0.68550134", "0.68418026", "0.6821936", "0.68092376", "0.67857873", "0.67819905", "0.67688185", "0.6767324", "0.67597884", "0.6734347", "0.67342144", "0.67079216", "0.6695675", "0.66910774", "0.6683873", "0.66814816", "0.6669739", "0.66535985", "0.66210777", "0.6613291", "0.6606013", "0.66016555", "0.66016555", "0.65864974", "0.65851396", "0.656568", "0.65656596", "0.65620977", "0.65502864", "0.654684", "0.6545456", "0.65402955", "0.65298676", "0.65290713", "0.65259236", "0.6525408", "0.6520651", "0.6519721", "0.6501544", "0.650121", "0.64942855", "0.64938587", "0.64875567", "0.6483698", "0.6483517", "0.6474665", "0.645325", "0.6448006", "0.6435507", "0.6434195", "0.64341223", "0.6432684", "0.6431303", "0.6424281", "0.64238644", "0.642365", "0.642195", "0.64200056", "0.641233", "0.641233", "0.64100236", "0.64063543", "0.6405605", "0.6394489", "0.63937783", "0.63872325", "0.6386047", "0.6376004", "0.6368885", "0.63665754", "0.6363325", "0.63611764", "0.6352628", "0.6350995", "0.63506424", "0.6347735" ]
0.0
-1
onehot encode categorical, normalize scalar/player_id inputs
def preprocess_screen(screen): layers = [] for i in range(len(features.SCREEN_FEATURES)): if i == _SCREEN_UNIT_TYPE: layers.append(screen[i:i + 1] / features.SCREEN_FEATURES[i].scale) elif i == _SCREEN_SELECTED: layers.append(screen[i:i + 1] / features.SCREEN_FEATURES[i].scale) elif i == _SCREEN_PLAYER_RELATIVE: layer = np.zeros([features.SCREEN_FEATURES[i].scale, screen.shape[1], screen.shape[2]], dtype=np.float32) for j in range(features.SCREEN_FEATURES[i].scale): indy, indx = (screen[i] == j).nonzero() layer[j, indy, indx] = 1 layers.append(layer) return np.concatenate(layers, axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __encode_one_hot_util(self):\n for col in self.cat_cols:\n if (\n col in self.train_df\n and col + str(\"Encoded\") not in self.ord_cols\n ):\n if self.test_df is not None:\n self.test_df = pd.concat(\n [\n self.test_df,\n pd.get_dummies(\n self.test_df[col], prefix=col\n ).astype(\"category\"),\n ],\n axis=1,\n )\n self.train_df = pd.concat(\n [\n self.train_df,\n pd.get_dummies(self.train_df[col], prefix=col).astype(\n \"category\"\n ),\n ],\n axis=1,\n )", "def one_hot_encode(df, col):\n return pd.get_dummies(df, columns=[col], drop_first=True)", "def encode_one_hot(s):\n all = []\n for c in s:\n x = np.zeros((INPUT_VOCAB_SIZE)) \n index = char_indices[c]\n x[index] = 1 \n all.append(x)\n return all", "def one_hot_encode(df, ohe_cols):\n return pd.get_dummies(df, columns=ohe_cols)", "def one_hot_encode(self, arr, n_labels):\n one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32)\n one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.\n one_hot = one_hot.reshape((*arr.shape, n_labels))\n \n return one_hot", "def one_hot_encoding(data):\n\n data_encoded = pd.get_dummies(data)\n\n return data_encoded", "def one_hot_encode(x_: ArrayLike) -> tuple[IntArray, dict[str, int]]:\n x: np.ndarray = np.copy(x_)\n if x.ndim == 1:\n x = x[:, np.newaxis]\n shape = x.shape\n has_na = np.any(pd.isna(x))\n if x.dtype == object:\n x = x.astype(str)\n categories, codes = np.unique(x, return_inverse=True)\n num_classes = len(categories)\n encoded_x = np.zeros((x.size, num_classes), dtype=np.uint8)\n encoded_x[np.arange(x.size), codes.astype(np.uint8).ravel()] = 1\n encoded_x = encoded_x.reshape(*shape, num_classes)\n if has_na:\n # remove NaN column\n categories = categories[:-1]\n encoded_x = encoded_x[:, :, :-1]\n mapping = {\n _category_name(category): code for code, category in enumerate(categories)\n }\n return encoded_x, mapping", "def pre_process_data(df):\n\n # one-hot encode categorical values\n df = pd.get_dummies(df)\n\n return df", "def onehot_encode_labels(y):\n\treturn OneHotEncoder(categories=\"auto\", sparse=False).fit_transform(y.reshape(y.shape[0],1))", "def one_hot_encode(x):\n\n # check if encoder has been previously created, if not make a global var an initialize it\n if 'encoder' not in globals():\n global encoder\n encoder = LabelBinarizer()\n encoder.fit(range(10))\n\n return encoder.transform(x)", "def one_hot_encoding(data):\r\n encoder = LabelEncoder()\r\n y = encoder.fit_transform(data)\r\n return(y)", "def _one_hot_encoder(self):\n ohe = preprocessing.OneHotEncoder()\n ohe.fit(self.dataframe[self.cat_feats])\n return ohe.transform(self.dataframe_d_copy[self.cat_feats])", "def one_hot_encode(x):\n # TODO: Implement Function\n lb = preprocessing.LabelBinarizer()\n lb.fit([0,1,2,3,4,5,6,7,8,9])\n \n return lb.transform(x)", "def one_hot_encoding(sequence):\n\n mydict = {\n \"A\": np.asarray([1, 0, 0, 0]),\n \"a\": np.asarray([1, 0, 0, 0]),\n \"C\": np.asarray([0, 1, 0, 0]),\n \"c\": np.asarray([0, 1, 0, 0]),\n \"G\": np.asarray([0, 0, 1, 0]),\n \"g\": np.asarray([0, 0, 1, 0]),\n \"T\": np.asarray([0, 0, 0, 1]),\n \"t\": np.asarray([0, 0, 0, 1]),\n \"Y\": np.asarray([0, 1, 0, 1]),\n \"y\": np.asarray([0, 1, 0, 1]),\n \"R\": np.asarray([1, 0, 1, 0]),\n \"r\": np.asarray([1, 0, 1, 0]),\n \"S\": np.asarray([0, 1, 1, 0]),\n \"s\": np.asarray([0, 1, 1, 0]),\n \"W\": np.asarray([1, 0, 0, 1]),\n \"w\": np.asarray([1, 0, 0, 1]),\n \"K\": np.asarray([0, 0, 1, 1]),\n \"k\": np.asarray([0, 0, 1, 1]),\n \"M\": np.asarray([1, 1, 0, 0]),\n \"m\": np.asarray([1, 1, 0, 0]),\n \"B\": np.asarray([0, 1, 1, 1]),\n \"b\": np.asarray([0, 1, 1, 1]),\n \"D\": np.asarray([1, 0, 1, 1]),\n \"d\": np.asarray([1, 0, 1, 1]),\n \"H\": np.asarray([1, 1, 0, 1]),\n \"h\": np.asarray([1, 1, 0, 1]),\n \"V\": np.asarray([1, 1, 1, 0]),\n \"v\": np.asarray([1, 1, 1, 0]),\n \"N\": np.asarray([0, 0, 0, 0]),\n \"n\": np.asarray([0, 0, 0, 0]),\n \"-\": np.asarray([0, 0, 0, 0]),\n }\n print(f\"Seq: {sequence}\")\n if len(sequence) > 0:\n nuc_list = list()\n for nuc in list(sequence):\n nuc_list.append(mydict[nuc])\n result = np.stack(np.asarray(nuc_list, dtype=\"int8\"))\n return result\n else: \n print(\"ERROR! sequence is too short\")", "def one_hot_encode(self, meta_field):\n one_hot = pd.get_dummies(self.sample_meta[meta_field]).values\n return one_hot", "def one_hot_encode(x):\n # TODO: Implement Function\n output = np.zeros((len(x), 10))\n \n for i, j in enumerate(x):\n output[i,j] = 1\n \n return output", "def encode_one_hot2(s):\n x = np.zeros((LINE_SIZE, INPUT_VOCAB_SIZE))\n for n, c in enumerate(s):\n index = char_indices[c]\n x[n, index] = 1 \n return x", "def one_hot_encode(x):\n # TODO: Implement Function\n x_l = list(x)\n for index in np.arange(len(x_l)):\n x_l[index] = get_one_hot_vector(x[index])[x[index]]\n return np.array(x_l)", "def _one_hot_encode(label_vector, total_num_labels):\n out = np.zeros(shape=(len(label_vector), total_num_labels))\n for i in range(len(label_vector)):\n out[i, int(label_vector[i])] = 1\n return out", "def one_hot_encoding(labels, num_classes=10):\n num_labels = labels.shape[0]\n encoded = np.zeros((num_labels, num_classes))\n encoded[np.arange(num_labels), labels[np.arange(num_labels)]] = 1\n \n return encoded", "def one_hot_encoding(y):\n\n y_oh = np.zeros((y.shape[0], y.max() - y.min() + 1))\n\n # currently only works in min is actually 0\n for j in range(0, y_oh.shape[1]):\n y_oh[np.where(y == j), j] = 1\n\n return y_oh", "def one_hot_converter(column):\n # encode class values as integers\n encoder = LabelEncoder()\n encoder.fit(column) \n encoded_ = encoder.transform(column)\n # convert integers to dummy variables, i.e., one-hot encoded\n encoded_column = to_categorical(encoded_)\n \n return encoded_column", "def one_hot_enc(self, word):\n word = self.text_to_int(word)\n word = Variable(torch.tensor(word))\n word = torch.nn.functional.one_hot(word, len(self.index_map))\n return word.transpose(0, 1)", "def one_hot_encode(y, out_size):\n n = len(y)\n oh = np.zeros((n, out_size))\n oh[range(n), y] = 1\n return oh", "def preprocess_dataset(x, y):\n # Add the channel dimension.\n x = np.expand_dims(x, axis=-1)\n # Rescale to [-1, 1].\n x = x.astype(np.float32)\n x /= 128\n x -= 0.5\n # One-hot encode the labels.\n y = utils.to_categorical(y)\n return (x, y)", "def make_onehot(x,num_labels=7):\n enc = OneHotEncoder(n_values=num_labels)\n return enc.fit_transform(np.array(x).reshape(-1, 1)).toarray()", "def one_hot_encode(self, y: np.ndarray) -> np.ndarray:\n return np.eye(self.output_size)[y]", "def one_hot(y_):\n y_ = y_.reshape(len(y_))\n n_values = int(np.max(y_)) + 1\n n_values = 6\n return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS", "def one_hot_encoding(data_dict):\n merged_seq = []\n merged_label = []\n for key in __MERGE_KEYS__:\n merged_seq += list(data_dict[key])\n merged_label += list([__MERGE_LABELS__[key]] * len(data_dict[key]))\n merged_seq_int = list(map(map_base_to_int, merged_seq))\n\n X = to_categorical(merged_seq_int)\n Y = to_categorical(merged_label)\n return X, Y", "def one_hot_encode_single(mapping: dict[str, int], value: Optional[str]) -> IntArray:\n encoded_value = np.zeros((1, len(mapping)))\n if not pd.isna(value):\n code = mapping[str(value)]\n encoded_value[0, code] = 1\n return encoded_value", "def convert_to_one_hot(a):\n a = a[:, 0]\n a = a.astype(int)\n A = np.zeros((len(a), config.num_classes))\n A[np.arange(len(a)), a] = 1\n return A", "def encoding_onehot(df, target=None):\n if not target:\n target = ['user_type', 'city']\n for col in target:\n # Following is exactily the df.join() but is inplace.\n one_hot = pandas.get_dummies(df[col])\n for item in one_hot:\n df[item] = one_hot[item]\n df.drop([col], axis=1, inplace=True)\n return None", "def one_hot(df):\r\n # One-hot encode into \r\n cols = ['job', 'marital', 'education', 'month', 'day_of_week', 'poutcome']\r\n for each in cols:\r\n dummies = pd.get_dummies(df[each], prefix=each, drop_first=False)\r\n df = pd.concat([df, dummies], axis=1)\r\n df = df.drop(cols,axis=1)\r\n return df", "def _onehot(y, n_classes=False):\n if not n_classes:\n \"\"\"Create one-hot encoded labels.\"\"\"\n n_classes = len(set(y))\n out = np.zeros((len(y), n_classes))\n for i, ii in enumerate(y):\n out[i][ii] += 1\n y_onehot = out.astype(int)\n return y_onehot", "def one_hot_encode(x, n_classes):\n return np.eye(n_classes)[x]", "def one_hot_encoder(df, cols):\r\n\r\n for col in cols:\r\n if(\"admission\" in col):\r\n dummies = pd.get_dummies(df[col], drop_first=False)\r\n else:\r\n dummies = pd.get_dummies(df[col], prefix=col, drop_first=False)\r\n df = pd.concat([df, dummies], axis=1) \r\n df.drop([col],axis=1, inplace=True)\r\n return df", "def one_hot_encode(idx, vocab_size):\n # Initialize the encoded array\n one_hot = np.zeros(vocab_size)\n \n # Set the appropriate element to one\n one_hot[idx] = 1.0\n\n return one_hot", "def _maybe_one_hot(self, obs):\n if self.toOneHot:\n obs = np.reshape(obs, (1, -1))\n ints = obs.dot(self.multiplication_factor)\n x = np.zeros([obs.shape[0], self.one_hot_len])\n for i, j in enumerate(ints):\n x[i, j] = 1\n return x\n else:\n return obs", "def To1hot(label,num_class):\n onehot = np.zeros(num_class)\n onehot[label] = 1\n return onehot", "def _onehot(integer_labels):\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot", "def onehot(inputs, num_classes):\n num_sample = inputs.size(0)\n inputs_onehot = torch.zeros(num_sample, num_classes)\n inputs_onehot.scatter_(1, inputs.unsqueeze(1), 1.0)\n return inputs_onehot", "def one_hot_enc(df, ohe_cols):\n for col in ohe_cols:\n df_ohe = pd.get_dummies(df[col], preffix=col)\n df = pd.concat([df, df_ohe], axis=1)\n df.drop(col, axis=1, inplace=True)\n return df", "def onehot_encoding(labels, dim, device):\n out = th.zeros(list(labels.size()) + [dim]).to(device)\n out.scatter_(len(out.size()) - 1, labels.unsqueeze(-1), 1.0)\n return out", "def one_hot_encode(gt_image):\n\n # One hot encoding of each pixel according to the CVPR2018 classes\n label_ohe = list(map(lambda x: tf.to_float(tf.equal(gt_image // 1000, x)), cvpr2018_labels()))\n # Stack everything together\n return tf.stack(label_ohe, axis=-1)", "def to_one_hot_encoding(target_data):\n target_data = target_data.squeeze()\n n_class = len(np.unique(target_data))\n res = np.eye(n_class)[target_data.astype(int)]\n return res", "def one_hot_encoder(df, nan_as_category=True):\n original_columns = list(df.columns)\n categorical_columns = [col for col in df.columns if\n df[col].dtype == 'object']\n df = pd.get_dummies(df, columns=categorical_columns,\n dummy_na=nan_as_category)\n new_columns = [c for c in df.columns if c not in original_columns]\n return df, new_columns", "def one_hot_encode(labels, num_classes=None):\n if num_classes is None:\n num_classes = len(np.unique(labels))\n return np.eye(num_classes)[labels]", "def one_hot_encode(labels, num_classes=None):\n if num_classes is None:\n num_classes = len(np.unique(labels))\n return np.eye(num_classes)[labels]", "def onehot_encode_y(y, num_class):\n # Assertions\n assert isinstance(y, np.ndarray), \\\n 'y must be a numpy ndarray'\n assert isinstance(num_class, int), \\\n 'num_class must be an int'\n # Functionality\n one_hot = np.zeros((y.shape[0],num_class),dtype=np.int8)\n for index, cls in enumerate(y):\n one_hot[index, int(cls)] = 1\n\n return one_hot", "def onehot(trace):\n encoded_trace = np.zeros((len(trace), 3), dtype=int)\n encoded_trace[np.arange(len(trace)), trace] = 1\n return encoded_trace.flatten()", "def __one_hot(self, y):\n y_one_hot = np.zeros((y.size, y.max() + 1))\n y_one_hot[np.arange(y.size), y] = 1\n \n return y_one_hot", "def one_hot_encode(self, columns):\n if not columns:\n return None\n\n logging.info('one-hot-encoding columns: %s' % ','.join(columns))\n\n both_sets = pd.concat((self.train[columns], self.test[columns]))\n encoder = preprocessing.OneHotEncoder()\n encoded = encoder.fit_transform(both_sets).sorted_indices()\n\n # Split apart train and test set arrays after one-hot encoding.\n nd_train = self.train.shape[0]\n train_enc = encoded[:nd_train]\n test_enc = encoded[nd_train:]\n\n for col in columns:\n logging.debug('unique {}: {}'.format(col, both_sets[col].unique()))\n\n # Create a feature map for decoding one-hot encoding.\n counts = np.array([both_sets[col].unique().shape[0] for col in columns])\n fmap = []\n for i, column in enumerate(columns):\n unique_elements = np.sort(both_sets[column].unique())\n logging.debug('unique elements for col {}: {}'.format(\n column, unique_elements))\n fmap += ['%s-%d' % (column, idx) for idx in unique_elements]\n\n logging.info('after one-hot encoding, found # unique values:')\n for attr, n_values in zip(columns, counts):\n logging.info('%s: %d' % (attr, n_values))\n\n return train_enc, test_enc, fmap, encoder", "def process_categorical_data(data_df):\n return pd.get_dummies(data_df, columns=Columns.categorical)", "def one_hot_encode(label, label_values):\n semantic_map = []\n for colour in label_values:\n equality = np.equal(label, colour)\n class_map = np.all(equality, axis = -1)\n semantic_map.append(class_map)\n semantic_map = np.stack(semantic_map, axis=-1)\n\n return semantic_map", "def encode_labels(self, y, num_labels):\n onehot = np.zeros((num_labels, y.shape[0]))\n for i in range(y.shape[0]):\n onehot[y[i], i] = 1.0\n return onehot", "def encode_onehot(df, cols):\n vec = DictVectorizer()\n \n vec_data = pd.DataFrame(vec.fit_transform(df[cols].to_dict(outtype='records')).toarray())\n vec_data.columns = vec.get_feature_names()\n vec_data.index = df.index\n \n df = df.drop(cols, axis=1)\n df = df.join(vec_data)\n return df", "def _one_hot(self, key):\n train_df = self.train.copy(deep=False)\n one_hot = pd.get_dummies(train_df[key],drop_first=True)\n one_hot.columns=[key+'_'+col for col in one_hot.columns.values]\n train_df = pd.concat([train_df,one_hot],axis=1,sort=True)\n train_dfg = train_df.groupby('fullVisitorId')\n return train_dfg[one_hot.columns.values].sum()", "def class2onehot(class_labels, seq_len, batchsize, num_task):\n\n\n one_hot = torch.FloatTensor(batchsize,seq_len,num_task)\n one_hot.zero_()\n one_hot = one_hot.scatter_(1, seq_len,class_labels, 1)\n\n return one_hot", "def encode_onehot(df, cols):\n vec = DictVectorizer()\n\n vec_data = pd.DataFrame(vec.fit_transform(df[cols].to_dict(orient='records')).toarray())\n vec_data.columns = vec.get_feature_names()\n vec_data.index = df.index\n\n df = df.drop(cols, axis=1)\n df = df.join(vec_data)\n return df", "def one_hot_encode(df, colnames):\r\n for col in colnames:\r\n oh_df = get_dummies(df[col], prefix=col)\r\n df = concat([oh_df, df], axis=1)\r\n df = df.drop([col], axis=1)\r\n return df", "def one_hot(labels, dim):\n batch_size = labels.size(0)\n out = torch.zeros(batch_size, dim)\n out[np.arange(batch_size), labels.squeeze().long()] = 1\n return out", "def own_OneHotColumnCreator(df, columns):\n for col in cat_attribs:\n for value in df[col].unique():\n df[value] = (df[col] == value).astype(int)", "def one_hot_encode(Y, classes):\n if type(classes) is not int:\n return None\n if Y is None or type(Y) != np.ndarray:\n return None\n for c in Y:\n if c >= classes or c < 0:\n return None\n m = Y.shape[0]\n mtx = np.zeros((m, classes))\n\n for row, c_label in zip(mtx, Y):\n row[c_label] = 1\n\n return mtx.T", "def one_hot(indices, depth):\n # print(indices)\n encoded_indices = torch.zeros(indices.size() + torch.Size([depth])).cuda()\n index = indices.view(indices.size()+torch.Size([1]))\n encoded_indices = encoded_indices.scatter_(1,index,1)\n \n return encoded_indices", "def one_hot_encode_soy(soy_data, classes):\n\n print('[ INFO ]: One-hot-encoding soy data...')\n\n for col in soy_data.columns:\n if col not in ['index', 'attr_35']:\n\n # Find the min and max values in each column to account for all values\n col_min = min(soy_data[col])\n col_max = max(soy_data[col]) + 1\n for u in range(col_min, col_max):\n soy_data[col + '_' + str(u)] = (soy_data[col] == u).astype(int)\n soy_data = soy_data.drop(col, axis=1)\n if col == 'attr_35':\n\n # Create new columns for each unique class\n for u in soy_data[col].unique():\n soy_data['{}_class'.format(u)] = np.where(soy_data[col] == u, 1, 0)\n classes = np.char.replace(classes, u, u + '_class')\n soy_data = soy_data.drop(col, axis=1)\n\n return soy_data, classes", "def conv_y_to_onehot_mat(labels):\n one_idx = np.array(labels)\n nkind = len(np.unique(one_idx))\n nlabels = len(one_idx)\n\n ret = np.zeros((nkind, nlabels))\n ret[one_idx, np.arange(nlabels)] = 1\n return ret", "def to_onehot(labels: torch.Tensor, num_classes: int) -> torch.Tensor:\n if len(labels.size()) == 1:\n return F.one_hot(labels, num_classes).float()\n return labels", "def to_onehot(x, num_classes):\n return np.eye(num_classes, dtype='float32')[x]", "def encode_categorical(df):\n cat_cols = df.select_dtypes(\"category\").columns\n for col in cat_cols:\n df[col] = df[col].cat.codes + 1\n unique_no = len(df[col].unique())\n if unique_no < 50:\n df[col] = df[col].astype(\"uint8\")\n elif unique_no < 16000:\n df[col] = df[col].astype(\"int16\")\n else:\n df[col] = df[col].astype(\"int32\")\n return df", "def preprocess_input(text, tokenizer, max_id):\n X = np.array(tokenizer.texts_to_sequences(text)) - 1\n encoded = tf.one_hot(X, depth=max_id)\n return encoded", "def one_hot(indices, depth):\n\n encoded_indicies = torch.zeros(indices.size() + torch.Size([depth]))\n if indices.is_cuda:\n encoded_indicies = encoded_indicies.cuda() \n index = indices.view(indices.size()+torch.Size([1]))\n encoded_indicies = encoded_indicies.scatter_(1,index,1)\n\n return encoded_indicies", "def to_onehot(value, dim):\n one_hot = torch.zeros(value.shape[0], dim)\n one_hot[torch.arange(value.shape[0]), value.long()] = 1\n return one_hot", "def initialization_based(input_array):\n\n # Search for the unique labels in the array\n oh_array = np.unique(input_array, return_inverse=True)[1]\n\n # Define the shape of the one hot encoded array\n out = np.zeros((oh_array.shape[0], oh_array.max() + 1), dtype=int)\n\n # Set the predicted class on 1, and all the other classes stays at 0\n out[np.arange(out.shape[0]), oh_array] = 1\n\n return out", "def to_one_hot(labels, num_classes):\n shape = labels.size()\n shape = shape + (num_classes,)\n one_hot = torch.FloatTensor(shape)\n one_hot.zero_()\n dim = 1 if len(shape) == 2 else 2\n one_hot.scatter_(dim, labels.unsqueeze(-1), 1)\n return one_hot", "def one_hot(labels, classes=None):\n return K.utils.to_categorical(labels, classes)", "def get_mnist(one_hot_enc, normalized, flatten):", "def one_hot(labels):\n one_hot_labels = np.zeros(labels.shape + (n_actions,))\n for c in range(n_actions):\n one_hot_labels[labels == c, c] = 1.0\n return one_hot_labels", "def to_one_hot(y, depth=None):\n y_flat = y.to(torch.int64).view(-1, 1)\n depth = depth if depth is not None else int(torch.max(y_flat)) + 1\n y_one_hot = torch.zeros(y_flat.size()[0], depth, device=y.device).scatter_(1, y_flat, 1)\n y_one_hot = y_one_hot.view(*(tuple(y.shape) + (-1,)))\n return y_one_hot", "def one_hot(x, num_classes, dtype=jnp.float32):\n return jax.nn.one_hot(x, num_classes).astype(dtype)", "def dense_to_one_hot(labels_dense, num_classes):\r\n print ('in onehot', labels_dense, num_classes)\r\n num_labels = labels_dense.shape[0]\r\n index_offset = numpy.arange(num_labels) * num_classes\r\n labels_one_hot = numpy.zeros((num_labels, num_classes))\r\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\r\n return labels_one_hot", "def binary_to_one_hot(arr: np.ndarray) -> np.ndarray:\n res = np.zeros((arr.shape[0], 2))\n res[np.where(arr == 1)[0], 0] = 1\n res[np.where(arr == 0)[0], 1] = 1\n return res", "def label2onehot(self, batch_size, labels):\r\n dim = 6\r\n out = torch.zeros(batch_size, dim)\r\n out[np.arange(batch_size), labels] = 1\r\n return out", "def label_to_one_hot(label, num_of_class=2):\r\n import numpy as np\r\n one_hot = np.zeros((len(label), num_of_class), dtype=np.uint8)\r\n for i in range(len(label)):\r\n one_hot[i, int(label[i] - 1)] = 1 # label is 1 and 2\r\n\r\n return one_hot", "def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad=True)", "def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad=True)", "def __encode_categorical_util(self):\n cat = []\n # cat = self.cat_cols\n for col in self.cat_cols:\n if (\n col in self.train_df\n and col + str(\"Encoded\") not in self.ord_cols\n ):\n if self.test_df is not None:\n self.test_df[col + str(\"Encoded\")] = pd.factorize(\n self.test_df[col]\n )[0]\n self.test_df[col + str(\"Encoded\")] = self.test_df[\n col + str(\"Encoded\")\n ].astype(\"category\")\n self.train_df[col + str(\"Encoded\")] = pd.factorize(\n self.train_df[col]\n )[0]\n self.train_df[col + str(\"Encoded\")] = self.train_df[\n col + str(\"Encoded\")\n ].astype(\"category\")\n cat.append(str(col + str(\"Encoded\")))\n self.cat_cols += cat", "def decode_one_hot(x):\n s = []\n for onehot in x:\n one_index = np.argmax(onehot)\n c = indices_char[one_index]\n s.append(c) \n return ''.join(s)", "def chars_to_onehot(self, char_list):\n int_list = self.char_to_int(char_list)\n one_hot = np.zeros((len(self.unique_chars), len(int_list)))\n for i,int_elem in enumerate(int_list):\n one_hot[int_elem,i] = 1\n return one_hot", "def encode_one_hot(df, col_names) -> pd.DataFrame:\n for col_name in col_names:\n encoded = pd.get_dummies(df[col_name], prefix=col_name, drop_first=False)\n df = pd.concat([df, encoded], axis=1)\n df = df.drop([col_name], axis=1)\n return df", "def make_one_hot(y):\n one_hot = np.zeros((len(y), 10))\n for i in range(len(y)):\n one_hot[i, y[i]] = 1.\n return one_hot.transpose(1,0)", "def one_hot_encoding(self):\n \n try: \n\n # Encode dependent variable\n le = LabelEncoder()\n le.fit(self.data[\"consumption\"])\n df_dependent_enc = pd.DataFrame(le.transform(self.data[\"consumption\"]))\n\n # Encode independent variable\n categorical_features = Config.FEATURE_DEFINITION[\"category_cols\"]\n categorical_df = self.data.loc[:, self.data.columns.isin(categorical_features)]\n oe = OrdinalEncoder()\n oe.fit(categorical_df)\n df_catindependent_enc = pd.DataFrame(oe.transform(categorical_df))\n df_catindependent_enc.columns = categorical_df.columns\n\n except KeyError: \n\n st.write(\"Cannot perform one-hot encoding for numerical variables. Please check if variables are properly defined.\")\n st.write(self.data.columns != \"consumption\")\n df_dependent_enc = []\n df_catindependent_enc = []\n\n else:\n \n return df_dependent_enc, df_catindependent_enc", "def to_categorical(x, n_col=None):\n if not n_col:\n n_col = np.amax(x) + 1\n one_hot = np.zeros((x.shape[0], n_col))\n one_hot[np.arange(x.shape[0]), x] = 1\n return one_hot", "def label2onehot(self, labels, dim):\n batch_size = labels.size(0)\n out = torch.zeros(batch_size, dim)\n out[np.arange(batch_size), labels.long()] = 1\n return out", "def one_hot_encode(y, classes=None):\n reshaped = y.reshape((reduce(mul, y.shape), 1))\n encoder = OneHotEncoder(sparse=False)\n if classes is None:\n encoded = encoder.fit_transform(reshaped)\n else:\n classes = np.array(classes)\n n_classes = classes.shape[0]\n encoder.fit(classes.reshape((n_classes, 1)))\n encoded = encoder.transform(reshaped)\n return encoded.reshape(list(y.shape) + [encoded.shape[1]])", "def onehot_enc(df, categorical_columns, categories):\n noncategorical_cols = [col for col in df.columns if col not in categorical_columns]\n \n enc = OneHotEncoder(categories=categories,\n sparse=False,\n handle_unknown='ignore')\n y = enc.fit_transform(df[categorical_columns].fillna(\"None\"))\n \n ohe_cols = [\n f\"{col}_{c}\" for col, cats in zip(categorical_columns, categories) for c in cats]\n df1 = pd.DataFrame(y, columns=ohe_cols)\n \n output_df = pd.concat([df[noncategorical_cols], df1], axis=1)\n return output_df, ohe_cols", "def one_hot(x):\n cart_pos,cart_vel,pole_ang,pole_vel = x\n\n # Cart position\n discrete_cart_pos = int((cart_pos - CART_POSITION_MIN)/(CART_POSITION_MAX-CART_POSITION_MIN)*4)\n\n # Pole angle\n discrete_pol_ang = int((cart_pos - POLE_ANGLE_MIN)/(POLE_ANGLE_MAX-POLE_ANGLE_MIN)*4)\n\n # Cart velocity\n cart_vel_discretisations = [-1,0,1]\n discrete_cart_vel= 3\n for i,v in enumerate(cart_vel_discretisations):\n if cart_vel < v:\n discrete_cart_vel = i\n break\n\n # Pole tip velocity\n pole_vel_discretisations = [-1,0,1]\n discrete_pole_vel= 3\n for i,v in enumerate(pole_vel_discretisations):\n if pole_vel < v:\n discrete_pole_vel = i\n break\n\n # Convert to one-hot encoding\n x = discrete_cart_pos + discrete_cart_vel*4 + discrete_pol_ang*8 + discrete_cart_vel*12\n output = [0] * ONE_HOT_NUM_FEATURES\n output[x] = 1\n return np.array([output]).transpose()", "def _create_onehot(cls, onnx_node, inputs, opset_version):\n axis = onnx_node.getattr(\"axis\", -1)\n # we move several inputs to singa's attribuates\n # and mark them so we don't use them when we run this operator\n depth = tensor.to_numpy(inputs.pop(1)).astype(np.int32)\n value = tensor.to_numpy(inputs.pop(1))\n onnx_node.consumed_inputs.extend(onnx_node.inputs[1:])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axis, depth, value)", "def process(self, data_set: DataSet):\n logging.info(f\"\\nOne hot encoding DataSet...\")\n if len(data_set.get_categorical_attribute_indexes()) == 0:\n logging.info(f\"No need to one hot encode. No categorical variables left.\")\n return\n\n for index in data_set.get_categorical_attribute_indexes():\n column = data_set.get_attribute_column(index)\n\n unique = np.unique(column)\n logging.info(f\"Attribute index '{index}' has categorical variables: {unique}\")\n unique_dict = {x: i for i, x in enumerate(unique)}\n new_columns = [[0. for i in range(len(data_set))] for j in unique]\n\n for i, value in enumerate(column):\n new_columns[unique_dict[value]][i] = 1.\n\n for col in new_columns:\n data_set.add_column(col)\n\n for index in sorted(data_set.get_categorical_attribute_indexes(), reverse=True):\n data_set.remove_column(index)\n\n logging.info('One hot encoded DataSet:')\n logging.info(data_set.summary())", "def dense_to_one_hot(labels_dense, label):\n num_labels = len(labels_dense)\n index_offset = list(labels_dense).index(label)\n labels_one_hot = np.zeros(num_labels)\n labels_one_hot[index_offset] = 1\n return labels_one_hot", "def onehot(train_cat, test_cat):\n train_enc = []\n test_enc = []\n columns = []\n for c in cols(train_cat):\n train = train_cat.loc[:,c]\n test = test_cat.loc[:,c]\n labels = list(set(train.unique().tolist()) |\n set(test.unique().tolist()))\n\n l_encoder = LabelEncoder()\n l_encoder.fit(labels)\n oh_encoder = OneHotEncoder(sparse=False, n_values=len(labels))\n\n train_ftr = l_encoder.transform(train).reshape(len(train), 1)\n train_enc.append(oh_encoder.fit_transform(train_ftr))\n test_ftr = l_encoder.transform(test).reshape(len(test), 1)\n test_enc.append(oh_encoder.fit_transform(test_ftr))\n\n col_tmp = list(l_encoder.inverse_transform(range(len(labels))))\n columns = columns + [\"%s_%s\" % (c, i) for i in col_tmp]\n\n new_train_cat = pd.DataFrame(np.column_stack(train_enc), columns=columns,\n dtype=np.uint8)\n new_test_cat = pd.DataFrame(np.column_stack(test_enc), columns=columns,\n dtype=np.uint8)\n return new_train_cat, new_test_cat", "def dummify_all_categorical(df):\n\n df = pd.get_dummies(df)\n df = dummify(df, \"detailed industry recode\")\n df = dummify(df, \"detailed occupation recode\") ## add some variables that are encoded as int64 but that are in fact categorical\n return df" ]
[ "0.75467926", "0.71732116", "0.7166452", "0.7154489", "0.71511304", "0.71498376", "0.7130969", "0.71149933", "0.7075103", "0.70738035", "0.7065001", "0.70538414", "0.70359933", "0.70098567", "0.7004153", "0.6951065", "0.69328225", "0.69303125", "0.6927022", "0.692566", "0.6913862", "0.68970597", "0.68882537", "0.68856406", "0.6867454", "0.68550134", "0.68418026", "0.6821936", "0.68092376", "0.67857873", "0.67819905", "0.67688185", "0.6767324", "0.67597884", "0.6734347", "0.67342144", "0.67079216", "0.6695675", "0.66910774", "0.6683873", "0.66814816", "0.6669739", "0.66535985", "0.66210777", "0.6613291", "0.6606013", "0.66016555", "0.66016555", "0.65864974", "0.65851396", "0.656568", "0.65656596", "0.65620977", "0.65502864", "0.654684", "0.6545456", "0.65402955", "0.65298676", "0.65290713", "0.65259236", "0.6525408", "0.6520651", "0.6519721", "0.6501544", "0.650121", "0.64942855", "0.64938587", "0.64875567", "0.6483698", "0.6483517", "0.6474665", "0.645325", "0.6448006", "0.6435507", "0.6434195", "0.64341223", "0.6432684", "0.6431303", "0.6424281", "0.64238644", "0.642365", "0.642195", "0.64200056", "0.641233", "0.641233", "0.64100236", "0.64063543", "0.6405605", "0.6394489", "0.63937783", "0.63872325", "0.6386047", "0.6376004", "0.6368885", "0.63665754", "0.6363325", "0.63611764", "0.6352628", "0.6350995", "0.63506424", "0.6347735" ]
0.0
-1
Plot a line from slope and intercept
def abline(slope, intercept): axes = plt.gca() x_vals = np.array(axes.get_xlim()) y_vals = intercept + slope * x_vals plt.plot(x_vals, y_vals, '--')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_regression_line():\r\n axes = plt.gca()\r\n x_vals = np.array(axes.get_xlim())\r\n y_vals = y_intercept() + slope() * x_vals\r\n plt.plot(x_vals, y_vals)", "def abline(slope, intercept):\n axes = plt.gca()\n x_vals = np.array(axes.get_xlim())\n y_vals = intercept + slope * x_vals\n plt.plot(x_vals, y_vals)", "def abline(slope, intercept):\n axes = plt.gca()\n x_vals = np.array(axes.get_xlim())\n y_vals = intercept + slope * x_vals\n plt.plot(x_vals, y_vals, '--', color='r')", "def abline(slope, intercept):\n global axes\n axes = plt.gca()\n x_vals = np.array(axes.get_xlim())\n y_vals = intercept + slope * x_vals\n return plt.plot(x_vals, y_vals)", "def line(intercept, slope, x):\n return slope*x + intercept", "def abline(self,slope, intercept,axis):\n #axis = plt.gca()\n x_vals = np.array(axis.get_xlim())\n y_vals = intercept + slope * x_vals\n axis.plot(x_vals, y_vals, 'k--')", "def abline(slope, intercept, a, b):\n # axes = plt.gca()\n print(slope)\n print(intercept)\n x_vals = np.array(list_xs[ a: b])\n y_vals = intercept + slope * (x_vals-a)\n plt.plot(x_vals, y_vals, '--')\n # print(x_vals)", "def redraw_slope(self):\n a = np.linspace(0, 9, self.num_points)\n b = [(self.slope * n) for n in a]\n\n self.output_widget.clear_output(wait=True)\n with self.output_widget as f:\n fig, ax = plt.subplots(1,1,figsize=(6, 4), dpi=100)\n# plt.ylim(ymax=max(self.y)+1)\n# plt.xlim(xmax=max(self.x)+1)\n\n plt.scatter(self.x, self.y)\n# plt.plot(a, b)\n plt.tick_params(\n axis='both', # changes apply to the both-axis\n which='both', # both major and minor ticks are affected\n# bottom=False, # ticks along the bottom edge are off\n# top=False, # ticks along the top edge are off\n labelbottom=False, labelleft=False) #\n\n plt.xlabel('Total rainfall (inch)', fontsize=10)\n plt.ylabel('Total sales', fontsize=10)\n from numpy.polynomial.polynomial import polyfit\n intercept, m = polyfit(self.x, self.y, 1)\n ax.vlines(self.x, self.y, intercept + m * self.x, label='residual')\n plt.plot(self.x, intercept + m * self.x, '-', c='orange',\n label=\"$Y = {:.3f} X {} {:.3f}$\".format(m, '+' if intercept>0 else '-', abs(intercept)))\n plt.legend()\n plt.show()", "def _get_slope(x, y):\n slope = linregress(x, y)\n return slope", "def abline(points, slope, intercept):\n x_values = get_column(points, 0)\n return [slope * i + intercept for i in x_values]", "def _regression_slope_metric(x_data, y_data):\n reg = linregress(x_data, y_data)\n return reg.slope", "def make_line(x = np.linspace(start = 0, stop = 50, num = 200), slope = 2, intercept = 0, noise = 3):\n \n return np.c_[x, slope * x + intercept + np.random.normal(loc = 0, scale = 3, size = len(x))]", "def fit_line(x_data, y_data):\n\tslope, y_intercept, r_value, p_value, std_err = stats.linregress(x_data, y_data)\n\tr_squared = r_value * r_value\n\treturn slope, y_intercept, r_squared, p_value, std_err", "def slope(x1, y1, x2, y2):\n return (y2 - y1) / (x2 - x1)", "def plot_linear_1D(linreg, X, y, xlim, ylim):\n\n pass", "def simple_linreg(n,x,y):\n x_bar = sum(x)/float(n) # average of x\n y_bar = sum(y)/float(n) # average of y\n\n m = sum((x-x_bar)*(y-y_bar))/ float(sum((x-x_bar)**2) ) # slope\n b = y_bar - m*x_bar # y intercept\n\n print(\"The linear regression has resulted in ...\")\n print(\"m(slope) : \",m )\n print(\"b(y intercept) : \",b )\n return m,b", "def slope(x1, y1, x2, y2):\r\n delta_y = y2-y1\r\n delta_x = x2-x1\r\n return delta_y / delta_x", "def scatter_linregress(ax, x, y, title, xlabel=\"\", ylabel=\"\", regress=True):\n xlabel = x.name if xlabel == \"\" else \"\"\n ylabel = y.name if ylabel == \"\" else \"\"\n \n #scatter plot\n ax.scatter(x, y)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n \n if regress:\n # linear regression and plot\n (slope, intercept, rvalue, pvalue, stderr) = linregress(x, y)\n ax.plot(x, intercept + slope * x, 'r', label='fitted line')\n ax.annotate(f\"y = {round(slope, 2)} * x + {round(intercept, 2)}\\nR squared: {round(rvalue**2, 2)}\",(x.min(),y.min()),fontsize=15,color=\"red\")", "def slope_lines(self,image):\r\n img_copy = image.copy()\r\n \r\n left_lines,right_lines=self.makeLeftRightline()\r\n left_line = np.mean(left_lines, axis=0)\r\n right_line = np.mean(right_lines, axis=0)\r\n\r\n poly_vertices = []\r\n order = [0,1,3,2]\r\n\r\n for slope, intercept in [left_line, right_line]:\r\n #getting height of image in y1\r\n rows, cols = image.shape[:2]\r\n y1= int(rows) \r\n #taking y2 upto 68% of y1\r\n y2= int(rows*0.68) \r\n #y=mx +c can be written as x=(y-c)/m\r\n x1=int((y1-intercept)/slope)\r\n x2=int((y2-intercept)/slope)\r\n poly_vertices.append((x1, y1))\r\n poly_vertices.append((x2, y2))\r\n\r\n # DRAWING LINES AND PATH ON THE IMAGE\r\n thickness_of_line=9\r\n color_of_line=[20, 255, 20]\r\n lines=np.array([[[x1,y1,x2,y2]]])\r\n for i in lines:\r\n for x1,y1,x2,y2 in i:\r\n cv2.line(img_copy, (x1, y1), (x2, y2), color_of_line, thickness_of_line)\r\n poly_vertices = [poly_vertices[i] for i in order]\r\n #filling polygon color\r\n cv2.fillPoly(img_copy, pts = np.array([poly_vertices],'int32'), color = (200,20,20))\r\n final_out=cv2.addWeighted(image,0.7,img_copy,0.4,0.)\r\n return final_out", "def set_slope(self, slope: float) -> None:\r\n self.slope = slope", "def fit_line(data, error_func):\n\n # Generate initial guess for line model\n l = np.float32([0, np.mean(data[:, 1])]) # slope = 0, intercept = mean(y values)\n\n # Plot initial guess (optional)\n x_ends = np.float32([-5, 5])\n plt.plot(x_ends, l[0] * x_ends + l[1], 'm--', linewidth = 2.0, label = 'Initial guess')\n\n # Call optimizer to minimize error function\n result = spo.minimize(error_func, l, args = (data, ), method = 'SLSQP', options = {'disp': True})\n return result.x", "def linear_slope_fit(wf, mean_y, sigma_y, slope, intercept):\n\n sum_x = sum_x2 = sum_xy = sum_y = mean_y[0] = sigma_y[0] = 0\n isum = len(wf)\n\n for i,value in enumerate(wf):\n sum_x += i \n sum_x2 += i**2\n sum_xy += (value * i)\n sum_y += value\n mean_y += (value-mean_y) / (i+1)\n sigma_y += (value-mean_y)**2\n\n\n sigma_y /= (isum + 1)\n np.sqrt(sigma_y, sigma_y)\n\n\n slope[0] = (isum * sum_xy - sum_x * sum_y) / (isum * sum_x2 - sum_x * sum_x)\n intercept[0] = (sum_y - sum_x * slope[0])/isum", "def drawSlope(self):\n length = sqrt(1 + self.slope**2) # Length of the line segment over 1 x-unit\n xOffset = (segmentLength / length) / 2 # Figures out how many times the length of the 1 unit length fits into the desired length\n # then divides by 2 becuase half is on the left and half on the right of the center\n\n\n # Left end point\n xLeft = self.x - xOffset\n yLeft = (self.slope * (xLeft - self.x)) + self.y\n\n # Right end point\n xRight = self.x + xOffset\n yRight = (self.slope * (xRight - self.x)) + self.y\n\n\n # Converts the left and right end points from cartesian coordinates to screen coordinates\n left = cartesianToScreen(xLeft , yLeft)\n right = cartesianToScreen(xRight, yRight)\n\n\n pygame.draw.aaline(display, self.color, left, right, 1) # DRAWS THE LINE AHHHHHHHHHHHHHHHHHH :P", "def plot(self, **kwargs):\n base.plot_homline(self.line, **kwargs)", "def scatter_linreg_plot(x, y, ax=None, label='data', d_fmt='b.', l_fmt='k-',\n d_kw={}, l_kw={}):\n assert len(x) == len(y), 'scatter data must be same length'\n from matplotlib.pyplot import figure, axes, draw\n from scipy.stats import linregress\n if ax is None:\n f = figure()\n ax = axes()\n \n # Draw the scatter data\n ax.plot(x, y, d_fmt, label=label, **d_kw)\n \n # Get the linear regression\n m, b, r, p, sem = linregress(x, y)\n print '(r = %.4f, p = %.4e)' % (r, p)\n x0 = numpy.array([x.min(), x.max()], 'd')\n y0 = m * x0 + b\n \n # Plot the regression line\n ax.plot(x0, y0, l_fmt, zorder=-1, label='_nolegend_', **l_kw)\n draw()\n return r", "def intercept(x1, y1, x2, y2):\r\n m = slope(x1, y1, x2, y2)\r\n return y1 - m*x1", "def linear_regression(self, x_data, y_data, mask = None, ax = None):\n if mask is None:\n mask = full(len(y_data), True, dtype=bool)\n poly = poly1d(polyfit(x_data[mask], y_data[mask], 1))\n\n if ax is not None:\n ax.plot(x_data, polyval(poly, x_data), \"--r\",\\\n label = \"Slope: %.2f\" %(poly[1]))\n return poly", "def plot_line(ax, p1, p2, *args, **kwargs):\n ax.plot(*zip(p1, p2), *args, **kwargs)", "def line_plot(self, x, y, labels, ax=None):\n if ax is None:\n fig, ax = plt.subplots()\n else:\n fig = None\n ax.plot(x, y, '--o', label=labels[0])\n ax.set_xlabel(labels[1])\n ax.set_ylabel(labels[2])\n ax.set_title(labels[3])\n return fig, ax", "def line_plot(x, y=None):\n mpl_fig = plt.figure()\n if y is None:\n plt.plot(x)\n else:\n plt.plot(x, y)\n return get_div_from_data(mpl_fig)", "def slope(slope:float, offset=0., bounds: tuple[float, float] = None) -> core.Slope:\n return core.Slope(slope, offset, bounds=bounds)", "def linePlot(self):\n clf()\n plot(self.x,self.averages)\n xlabel('X Label (units)')\n ylabel('Y Label (units)')\n savefig('line.png')", "def click_line(fig: Optional[int], show_points: bool = False) -> Tuple[float, float]:\n if fig is not None:\n plt.figure(fig)\n pts0, pts1 = plt.ginput(2)\n if (pts1[0] - pts0[0]) == 0:\n raise Exception('vertical line not implemented')\n slope = (pts1[1] - pts0[1]) / (pts1[0] - pts0[0])\n offset = (pts0[1] - slope * pts0[0])\n\n if show_points:\n pts = np.array([pts0, pts1]).T\n plt.plot(pts[0], pts[1], '.-g')\n return offset, slope", "def slope_from_origin(self):\n\n return self.y / self.x", "def make_line_plot(data, x_label=\"Data\", y_label=\"Data Point\"):\n\n y = data\n x = range(len(y))\n\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.plot(x, y)\n plt.show()", "def line_graph():\n fig = plt.figure()\n ax = plt.axes()\n x = [1, 2, 3]\n y = [5, 6, 7]\n plt.plot(x, y)\n plt.show()", "def proxy_line(**kwargs):\r\n return matplotlib.lines.Line2D(range(1), range(1), **kwargs)", "def simple_line():\n\n # Make two datasets\n dataset_a = DataSet(sine)\n dataset_b = DataSet(cosine)\n\n # Make plot and add data\n plot = Plot()\n plot.set_text()\n plot.add_dataset(dataset_a)\n plot.add_dataset(dataset_b)\n\n # Plot graph and display\n plot.plot()\n plot.save(name='./figures/2d_simple_line',fmt='png')\n plot.display()", "def get_x_y_for_line(bounds, y_intercept, slope): \n\n x = np.sort(bounds)\n\n y = y_intercept + (slope * x)\n\n return x, y", "def slope_from_origin(self):\n\n return (self.y / self.x)", "def fit_slope_with_zero_intercept_residue(X,Y):\n X = np.array(X)\n Y = np.array(Y)\n slope = np.sum(Y*X)/np.sum(np.power(X,2))\n return slope*X - Y", "def plot_scatter(x, y, trendline_y=None, trendline_x=None, line_of_slope_1=False,\n x_label=None, y_label=None, x_limits=None, y_limits=None, axes_equal=True, figure_size=(10, 10.2),\n trendline_dots=False, **kwargs):\n if type(x) is pd.DataFrame:\n x = _convert_df_to_series(x)\n elif type(x) is np.ndarray or type(x) is list:\n x = pd.Series(x).rename('x')\n\n if type(y) is pd.DataFrame:\n y = _convert_df_to_series(y)\n elif type(y) is np.ndarray or type(y) is list:\n y = pd.Series(y).rename('y')\n\n if x_label is None:\n x_label = x.name\n if y_label is None:\n y_label = y.name\n\n merged_df = pd.concat([x, y], join='inner', axis=1)\n x = merged_df[x.name]\n y = merged_df[y.name]\n\n if trendline_y is not None:\n legend = True\n if trendline_x is None:\n trendline_x = merged_df[x.name]\n else:\n legend = False\n\n if line_of_slope_1 is True:\n legend = True\n\n fig, axes = plt.subplots(figsize=figure_size, **kwargs)\n _scatter_subplot(x, y, trendline_y=trendline_y, trendline_x=trendline_x, line_of_slope_1=line_of_slope_1,\n x_label=x_label, y_label=y_label, x_limits=x_limits, y_limits=y_limits, axes_equal=axes_equal,\n trendline_dots=trendline_dots, legend=legend, ax=axes)\n\n plt.close()\n return fig", "def plot_slopegraph(X,Y,color='b',names=None):\n \n assert(len(X) == len(Y)), 'X and Y must have same length'\n assert( (np.ndim(X) == np.ndim(Y)) & np.ndim(X) == 1 ), 'X and Y must be 1-dimensional arrays'\n \n N = len(X)\n for i in range(N):\n # Skip if one of value is NaN\n x = X[i]; y = Y[i]\n if ~np.isnan(x) and ~np.isnan(y):\n # Plot X, Y as scatter first\n plt.scatter([1,2],[x,y],color=color)\n # Plot slope\n plt.plot([1,2],[x,y],color=color)\n if names is not None:\n plt.xticks([1,2],names)", "def slope(self, x1, y1, x2, y2):\n if x1 == x2:\n slope = np.inf\n else:\n slope = (y2-y1)/(x2-x1)\n\n return np.math.atan(slope)", "def reftectionLineMatrix(slope:float=0, intercept:float=0):\n trans, trans_ = None, None\n if slope == inf:\n trans = (-intercept, 0)\n trans_ = (intercept, 0)\n else:\n trans = (0, -intercept)\n trans_ = (0, intercept)\n slope = atan(slope)\n refMat = Drawable.translateMatrix(*trans)\n refMat = np.dot(Drawable.rotateMatrix(-slope), refMat)\n refMat = np.dot(Drawable.scaleMatrix(1, -1), refMat)\n refMat = np.dot(Drawable.rotateMatrix(slope), refMat)\n refMat = np.dot(Drawable.translateMatrix(*trans_), refMat)\n return refMat", "def plothus(ax, x, y, *, datalabel='', linestyle = '-',\n marker = ''):\n out = ax.plot(x, y, zorder=1, label=datalabel, linestyle = linestyle,\n marker = marker)\n return out", "def line(l, color='k', **kwargs):\n ax.plot(wfl(nth(l, 0)), hfl(nth(l, 1)), color=color, **kwargs)", "def _timeseries_scatter_plot_reg(large_scale_signal_ts, regional_signal_ts,\n rvalue, slope):\n res = stats.linregress(large_scale_signal_ts, regional_signal_ts)\n y_values = res.intercept + res.slope * \\\n np.array(large_scale_signal_ts)\n rvalue = res.rvalue\n slope = res.slope\n return rvalue, slope, y_values", "def determine_angle_slope(line, ax):\n x, y = line.get_data()\n\n sp1 = ax.transData.transform_point((x[0],y[0]))\n sp2 = ax.transData.transform_point((x[-1],y[-1]))\n\n rise = (sp2[1] - sp1[1])\n run = (sp2[0] - sp1[0])\n\n return degrees(atan(rise/run))", "def _timeseries_scatter_plot_lines(axes):\n axes.axvline(\n x=0,\n ymin=-1000,\n ymax=1000,\n color=\"grey\",\n linestyle=\"dotted\",\n alpha=0.6,\n )\n axes.axhline(\n y=0,\n xmin=-1000,\n xmax=1000,\n color=\"grey\",\n linestyle=\"dotted\",\n alpha=0.6,\n )", "def line_plot():\n # generate data\n x = np.arange(0, 4 * np.pi, 0.1) # x in [0, 4* pi)\n y_cos = np.cos(x)\n\n plt.figure()\n plt.plot(x, y_cos)\n plt.xlabel('$x$')\n plt.ylabel('$y$')\n plt.title('Cosine function in $[0, 4\\pi)$ with line plot')\n plt.show()\n\n return None", "def find_slopes(x, y):\n slopes = np.zeros((len(x) - 1))\n for i in range(len(x) - 1):\n # m = (y2 - y1) / (x2 - x1)\n delta_x = x[i + 1] - x[i]\n delta_y = y[i + 1] - y[i]\n slopes[i] = delta_y / delta_x\n return slopes", "def scatter_and_line():\n\n # Make random data points around straight line\n random_linear = np.zeros((1000,2))\n random_linear[:,0] = np.random.uniform(0,10,1000)\n random_error = np.random.normal(0.0,2.0,1000)\n random_linear[:,1] = random_linear[:,0]*2.0+1.0+random_error\n\n # Make datasets, order determining line graph on top\n dataset_a = DataSet(random_linear,plot='scatter',order=0,label='Random')\n dataset_b = DataSet(linear,plot='line',colour='black',order=1,label='Linear')\n\n # Colour scatter graph by error\n dataset_a.set_colour(map='coolwarm',colour=random_error)\n\n # Make plot object and add datasets\n plot = Plot()\n plot.set_text(latex=True)\n plot.add_dataset(dataset_a)\n plot.add_dataset(dataset_b)\n plot.set_legend(legend=True)\n\n # Plot graph and display\n plot.plot()\n plot.save(name='./figures/2d_scatter_and_line',fmt='png')\n plot.display()", "def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)", "def slope(l):\n if l[1] == l[0]:\n return float(\"inf\")\n else:\n return float(l[3]-l[2])/(l[1]-l[0])", "def slope(point_a, point_b, flip):\n\n x_a, y_a = point_a\n x_b, y_b = point_b\n\n dx = x_b - x_a\n dy = y_b - y_a\n\n return -dx / dy if flip else dy / dx", "def fit_line(xs,ys,a,b):\n # Checking against empty list, if empty return 0s\n if not (xs):\n return 0,0,0,0\n \n # Preparing vectors for least square\n z = np.vstack([xs, np.ones(len(xs))]).T\n s = np.array(ys)\n\n # Applying least square fitting on points\n m, c = np.linalg.lstsq(z, np.array(ys))[0] #Applying least squares method\n \n #Using slope and intercept plus y coordinates to get x-coordinates\n x1 = int(a/m - c/m) \n x2 = int(b/m - c/m)\n \n return x1,a,x2,b", "def _scatter_subplot(x, y, trendline_y=None, trendline_x=None, line_of_slope_1=False,\n x_label=None, y_label=None, x_limits=None, y_limits=None, axes_equal=True, subplot_title=None,\n trendline_dots=False, scatter_color=COLOR_PALETTE.primary,\n trendline_color=COLOR_PALETTE.secondary, legend=True, scatter_name=None,\n trendline_name=None, ax=None):\n if ax is None:\n ax = plt.gca()\n\n if scatter_name is None:\n scatter_name = 'Data points'\n\n if trendline_name is None:\n trendline_name = 'Regression line'\n\n if trendline_dots is True:\n trendline_marker = 'o-'\n else:\n trendline_marker = '-'\n\n if x_limits is None or y_limits is None:\n x_min, x_max, y_min, y_max = _derive_axes_limits_for_scatter_plot(x, y)\n\n if axes_equal:\n ax.set_aspect('equal')\n if x_limits is None and y_limits is None:\n axes_min = min(x_min, y_min)\n axes_max = max(x_max, y_max)\n x_limits = (axes_min, axes_max)\n y_limits = (axes_min, axes_max)\n\n if x_limits is None:\n x_limits = (x_min, x_max)\n if y_limits is None:\n y_limits = (y_min, y_max)\n\n ax.set_xlim(x_limits[0], x_limits[1])\n ax.set_ylim(y_limits[0], y_limits[1])\n\n no_dots = len(x)\n\n marker_size_max = 216\n marker_size_min = 18\n marker_size = -0.2 * no_dots + marker_size_max # y=mx+c, m = (216 - 18) / (1000 - 0) i.e. slope changes up to 1000\n marker_size = marker_size_min if marker_size < marker_size_min else marker_size\n\n max_alpha = 0.7\n min_alpha = 0.3\n alpha = -0.0004 * no_dots + max_alpha # y=mx+c, m = (0.7 - 0.3) / (1000 - 0) i.e. alpha changes up to 1000 dots\n alpha = min_alpha if alpha < min_alpha else alpha\n\n ax.scatter(x, y, marker='o', color=scatter_color, s=marker_size, alpha=alpha,\n edgecolors='none', label=scatter_name)\n\n if trendline_y is not None:\n if trendline_x is None:\n trendline_x = x\n\n ax.plot(trendline_x, trendline_y, trendline_marker, color=trendline_color, label=trendline_name)\n\n if line_of_slope_1:\n low_x, high_x = ax.get_xlim()\n low_y, high_y = ax.get_ylim()\n low = max(low_x, low_y)\n high = min(high_x, high_y)\n ax.plot([low, high], [low, high], color=COLOR_PALETTE.secondary_70, label='1:1 line')\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n\n if legend:\n ax.legend()\n\n if subplot_title is not None:\n ax.set_title(subplot_title, fontsize=mpl.rcParams['ytick.labelsize'])\n\n return ax", "def _calculate_slope(klass, p1, p2):\n xdiff = p1.x - p2.x\n if xdiff:\n return (p1.y - p2.y) / xdiff\n else:\n return float(\"+inf\")", "def __init__(self, slope):\n self.slope = slope", "def line(self, x, y):\n self.call('line', x, y)", "def plot_line(m, b, xrange=None, yrange=None):\n if(G):\n line = Gnuplot.Func ('%f + (%f) * x' % (b, m))\n if xrange: G.set_range('xrange', xrange)\n if yrange: G.set_range('yrange', yrange)\n G.plot(line)\n wait_for_input()", "def line(\n self, x: Hashable | None = None, y: Hashable | None = None, **kwargs\n ) -> PlotAccessor:\n return self(kind=\"line\", x=x, y=y, **kwargs)", "def fit_line_Vo(x, y, n):\n x1=x[0:n]\n y1=y[0:n]\n X = sm.add_constant(x1)\n model = sm.OLS(y1, X, missing='drop') # ignores entires where x or y is NaN\n fit = model.fit()\n m=fit.params[1] \n b=fit.params[0] \n# stderr=fit.bse # could also return stderr in each via fit.bse\n \n N = 100 # could be just 2 if you are only drawing a straight line...\n points = np.linspace(x.min(), x.max(), N)\n \n \n fig=plt.figure(1) #PLOTING TOGETHER\n \n ax = fig.add_subplot(111)\n ax.plot(x, y)\n ax.plot(points, m*points + b)\n \n plt.legend(['data','fitt Vo'],fontsize=16)\n \n ax.set_yscale('linear',fontsize=16)\n ax.tick_params(axis='x', labelsize=14)\n ax.tick_params(axis='y', labelsize=14)\n plt.ylabel('Abs',fontsize=16)\n plt.xlabel('Time(sec)',fontsize=16)\n ax.grid()\n plt.grid()\n plt.show()\n \n print(\"The Vo fitted model is: {0:2f}*x+{1:2f} \".format(m, b))\n return m,b", "def linear_regression(x, y):\n #print(\"Fitting\", y, \"\\nagainst\", x)\n matrix = np.vstack( [x, np.ones_like(x)] ).T\n slope, intercept = np.linalg.lstsq(matrix,y)[0]\n #print(\"gives slope=\", slope, \"intercept=\", intercept)\n return (slope, intercept)", "def plot_lines(self):\n self.plot(3)", "def plot_scatter_and_linreg(df, col='b'):\n lr = LinearRegression()\n lr.fit(df['x'].reshape(-1, 1), df['y'])\n df.plot(kind='scatter', x='x', y='y', c=col, s=50)\n x_pred = np.linspace(df['x'].min(), df['x'].max(), 10)\n y_pred = lr.predict(x_pred.reshape(-1, 1))\n plt.plot(x_pred, y_pred, ls=':', c=col)\n\n plt.title(df.name)", "def linear_regression(data):\n x_values = [x for x, y in data] #Get x values\n y_values = [y for x, y in data] #Get y values\n x_mean = sum(x_values) / len(x_values) #Compute mean value of x\n y_mean = sum(y_values) / len(y_values) #Compute mean value of y\n # Compute\n coefficient = sum([(x - x_mean) * (y-y_mean) for x,y in data]) / sum([(x - x_mean) ** 2 for x in x_values])\n intercept = y_mean - coefficient * x_mean # Compute Intercept\n return((coefficient,intercept))", "def draw_lines(args, img, lines, color=[255, 0, 0], thickness=8):\n slopes = [ (line[0][3]-line[0][1])/(line[0][2]-line[0][0]) for line in lines]\n rights = [ [line, slope, line[0][1] - slope*line[0][0]] for line,slope in zip(lines, slopes) if slope > 0.0 ] # and slope < 0.5 and not np.isnan(slope) ]\n lefts = [ [line, slope, line[0][1] - slope*line[0][0]] for line,slope in zip(lines, slopes) if slope < 0.0 ] # and slope > -0.5 and not np.isnan(slope) ]\n #lefts[0] = [ [[x1,y1,x2,y2]] , slope , y_intercept ]\n\n y_mins = [ min(line[0][1],line[0][3]) for line in lines]\n y_min = min(y_mins)\n y_max = img.shape[0]\n\n log_new = [slopes, rights, lefts, y_mins, y_min, y_max]\n\n for lanes in [rights,lefts]:\n slope_mean = np.mean( [ lane[1] for lane in lanes ] )\n slope_std = np.std ( [ lane[1] for lane in lanes ] )\n if slope_std == 0:\n slope = slope_mean\n else:\n slope = np.mean( [ lane[1] for lane in lanes if lane[1] - slope_mean < 2*slope_std ] ) \n print()\n print('slope : {}'.format(slope))\n\n intercept_mean = np.mean( [ lane[2] for lane in lanes ] )\n intercept_std = np.std ( [ lane[2] for lane in lanes ] )\n if intercept_std == 0:\n intercept = intercept_mean\n else:\n intercept = np.mean( [ lane[2] for lane in lanes if lane[2] - intercept_mean < 2*intercept_std ] )\n print('intercept : {}'.format(intercept))\n \n x_min = int( ( y_min - intercept ) / slope ) \n x_max = int( ( y_max - intercept ) / slope )\n\n log_new.append(slope)\n log_new.append(intercept)\n log_new.append(x_min)\n log_new.append(x_max)\n\n cv2.line(img, (x_min, y_min), (x_max, y_max), color, thickness)\n\n try: \n log_line = pd.read_csv(args.path+args.csv_file, skiprows=[0], names=args.header)\n except:\n log_line = pd.DataFrame([ ], columns=args.header)\n finally:\n df = pd.DataFrame([ log_new ], columns=args.header)\n # update log: add new entry into the log\n result = pd.concat([log_line, df], ignore_index=True)\n result.to_csv(args.path+args.csv_file) #, index=False)", "def plot_scatter_points_lines(self):\n self.plot(2)", "def fit_slope_1d(X,Y):\n Sx = np.sum(X)\n Sy = np.sum(Y)\n Sxx = np.sum(np.power(X,2))\n Sxy = np.sum(X*Y)\n Syy = np.sum(np.power(Y,2)) \n n = len(X)*1.\n slope = (n*Sxy - Sx*Sy)/(n*Sxx-Sx**2)\n alpha = Sy/n - slope*Sx/n\n return slope, alpha", "def hLine(x_min, x_max, y):\n glClear(GL_COLOR_BUFFER_BIT)\n glColor3f(1.0, 0.0, 0.0)\n glPointSize(10.0) # Set the point with a specific radius\n glBegin(GL_POINTS) # Begin plotting point\n x = x_min\n while (x <= x_max):\n glVertex2f(x, y)\n x += 0.05\n glEnd()\n glFlush()", "def custom_lineplot(ax, x, y, error, xlims, ylims, color='red'):\n\n ax.errorbar(x, y, yerr=error, color=color, ls='--', marker='o', capsize=5, capthick=1, ecolor='black')\n\n ax.set_xlim(xlims)\n ax.set_ylim(ylims)\n\n return ax", "def setSlope(self, slope):\n self.angle = math.atan(slope)", "def calculate_slope(cnt):\n y = OrderedDict(cnt.most_common())\n v=np.log(list(y.values()))\n k=np.log(np.arange(1,len(v)+1,1))\n return linregress(k,v)", "def _draw_line(plot, hori, vert, color, text):\n plot.plot(hori, vert, '-o'+color)\n plot.text(hori[-1]-3, vert[-1]+2, text, color=color)", "def slope(Ser, n):\n slopes = [i*0 for i in range(n-1)]\n for i in range(n, len(Ser)+1):\n y = Ser[i-n:i]\n x = np.array(range(n))\n y_scl = (y - y.min())/ (y.max() - y.min())\n x_scl = (x - x.min())/ (x.max() - x.min())\n x_scl = sm.add_constant(x_scl)\n model = sm.OLS(y_scl, x_scl)\n result= model.fit()\n slopes.append(result.params[-1])\n slope_angl= (np.rad2deg(np.arctan(np.array(slopes))))\n return np.array(slope_angl)", "def linearfit_s1(x, y):\r\n x *= N.ones(1); y *= N.ones(1)\r\n mx = N.mean(x); my = N.mean(y)\r\n slope = 1.; yint = my-mx\r\n rl_slope, sd_slope, rl_yint, sd_yint, s_yint, cov = robust_linefit(x, y)\r\n \r\n return slope, sd_slope, yint, sd_yint, s_yint, cov", "def _xy_plot(cube, x_coord=None, reg_line=False, **plot_kwargs):\n plot_kwargs = deepcopy(plot_kwargs)\n if reg_line:\n if plot_kwargs.get('linestyle', '-') == '-':\n plot_kwargs.setdefault('marker', 'o')\n else:\n plot_kwargs.setdefault('marker', 's')\n plot_kwargs['linestyle'] = 'none'\n plot_kwargs.setdefault('markersize', 3)\n if x_coord is None:\n iris.plot.plot(cube, **plot_kwargs)\n if cube.coords(dim_coords=True):\n coord = cube.coord(dim_coords=True)\n x_data = coord.points\n else:\n coord = None\n x_data = np.arange(cube.shape[0])\n else:\n coord = cube.coord(x_coord)\n iris.plot.plot(coord, cube, **plot_kwargs)\n x_data = coord.points\n if not reg_line:\n return\n plot_kwargs['linestyle'] = '-'\n plot_kwargs['marker'] = None\n plot_kwargs.pop('label', None)\n y_data = cube.data\n reg = linregress(x_data, y_data)\n y_reg = reg.slope * x_data + reg.intercept\n plt.plot(x_data, y_reg, **plot_kwargs)", "def model_fixed_slope(train_x, train_y, test_x, slope=1):\n intercept = np.mean(train_y - train_x*slope)\n model_info = {'model': 'fixed_slope', 'const': intercept}\n predictions = test_x*slope + intercept\n return predictions, model_info", "def __call__(self, x):\n return self.slope * x + self.ordinate", "def getLine(self, **kwargs):\n return Line(self.p1, self.angle, **kwargs)", "def scatterplot(loc: List[CrimeStatistics]) -> None: \n # return None #stub\n #template based on visualization\n \n x = enrollment_list(loc)\n y = crime_list(loc)\n \n \n pyplot.scatter(x,y)\n pyplot.xlabel(\"Enrollment\")\n pyplot.ylabel(\"Total crime per campus\")\n pyplot.title(\"correlation between enrollment and crimes committed\")\n \n \n \n pyplot.show()\n print(linregress(x,y))\n \n \n return None", "def get_velocity(Velo, plot=False):\n slope = np.zeros(np.shape(Velo))\n for idx, v in enumerate(Velo):\n if len(v[0][v[0]>=0])>0 and len(v[1][v[0]>=0])>0: # when v is not empty\n regress = linregress(v[0][v[0]>=0], v[1][v[0]>=0])\n slope[idx, 1]=regress[0]\n if plot==True:\n\t plt.figure()\n\t plt.plot(v[0][v[0]>=0], v[1][v[0]>=0])\n\t plt.plot([0,v[0][-1]],[0,v[0][-1]*regress[0]])\n\n if len(v[0][v[0]<=0])>1:\n regress = linregress(v[0][v[0]<=0], v[1][v[0]<=0])\n slope[idx, 0]=regress[0]\n if plot==True:\n\t plt.plot(v[0][v[0]<=0], v[1][v[0]<=0])\n\t plt.plot([0,v[0][0]],[0,v[0][0]*regress[0]])\n else:\n slope[idx, 0]=np.NaN \n else:\n slope[idx, 1]=np.NaN\n slope[idx, 0]=np.NaN\n return slope", "def slope_from_origin(self):\n return round(math.degrees(abs(math.atan(self.y/self.x))), 2)", "def plot_slope_by_clust(ax, model, k, lower_bound=0, upper_bound=1, estimate_x_val=3, slope_col='r'):\n\n # Calculate slope\n x_slope = np.array([lower_bound, upper_bound])\n y_slope_pred_mean = model.obsmodel[k].model.predict(x_slope.reshape(-1, 1))[0]\n slope = ((y_slope_pred_mean[1] - y_slope_pred_mean[0]) / (x_slope[1] - x_slope[0]))[0]\n intercept = y_slope_pred_mean[0][0]\n\n x_slp_vals = np.array(ax.get_xlim())\n y_slp_vals = intercept + slope * x_slp_vals\n\n ax.plot(x_slp_vals, y_slp_vals, '--', color=slope_col, linewidth=3)\n\n\n # Estimate difference between slope prediction and MoGP at estimate_x_val years\n mogp_estim = model.obsmodel[k].model.predict(np.array([estimate_x_val]).reshape(-1, 1))[0][0][0]\n slope_estim = (intercept + slope * estimate_x_val)\n estim_diff = (mogp_estim - slope_estim)\n\n return estim_diff", "def plot_linear(x_range, w, b):\n\tplt.plot(x_range, x_range * w + b)", "def labelLine(line, x, label=None, align=True, **kwargs):\n\n ax = line.get_axes()\n xdata = line.get_xdata()\n ydata = line.get_ydata()\n\n if (x < xdata[0]) or (x > xdata[-1]):\n print('x label location is outside data range!')\n return\n\n # Find corresponding y coordinate and angle of the line\n ip = 1\n for i in range(len(xdata)):\n if x < xdata[i]:\n ip = i\n break\n\n y = ydata[ip-1] + (ydata[ip]-ydata[ip-1])*(x-xdata[ip-1])/(xdata[ip]-xdata[ip-1])\n\n if not label:\n label = line.get_label()\n\n if align:\n #Compute the slope\n dx = xdata[ip] - xdata[ip-1]\n dy = ydata[ip] - ydata[ip-1]\n ang = degrees(atan2(dy,dx))\n\n #Transform to screen co-ordinates\n pt = np.array([x,y]).reshape((1,2))\n trans_angle = ax.transData.transform_angles(np.array((ang,)),pt)[0]\n\n else:\n trans_angle = 0\n\n #Set a bunch of keyword arguments\n if 'color' not in kwargs:\n kwargs['color'] = line.get_color()\n\n if ('horizontalalignment' not in kwargs) and ('ha' not in kwargs):\n kwargs['ha'] = 'center'\n\n if ('verticalalignment' not in kwargs) and ('va' not in kwargs):\n kwargs['va'] = 'center'\n\n if 'backgroundcolor' not in kwargs:\n kwargs['backgroundcolor'] = ax.get_axis_bgcolor()\n\n if 'clip_on' not in kwargs:\n kwargs['clip_on'] = True\n\n if 'zorder' not in kwargs:\n kwargs['zorder'] = 2.5\n\n ax.text(x,y,label,rotation=trans_angle,**kwargs)", "def plot_linear_trend(ax, series, title='', xlabel='', ylabel=''):\n linear_trend = fit_linear_trend(series)\n plot_trend_data(ax, title, series)\n ax.plot(series.index, linear_trend)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)", "def draw_line(xy1, xy2, ax, **kwargs): \n x_arr = [xy1[0], xy2[0]]\n y_arr = [xy1[1], xy2[1]]\n edge = Line2D([x_arr],[y_arr], **kwargs)\n ax.add_line(edge)\n\n return ax,", "def line_eq(self, independent, dependent):\n\n try:\n m = self.get_slope(independent, dependent)\n b = self.get_intercept(independent, dependent)\n lin_equation = \"y = \" + str(m) + \"x \"\n if(b < 0):\n lin_equation += \"+ (\" + str(m) + \")\"\n else:\n lin_equation += \"+ \" + str(b)\n \n return lin_equation\n except Exception as e:\n print(e)", "def slope(start, end):\n\tx1 = start[0]\n\ty1 = start[1]\n\tx2 = end[0]\n\ty2 = end[1]\n\ttop = float(y2 - y1) \n\tbot = float(x2 - x1)\n\tif bot == 0:\n\t\treturn None\n\telse:\n\t\treturn top / bot", "def make_line_points(y1, y2, line):\n if line is None:\n return None\n\n slope, intercept = line\n\n # make sure everything is integer as cv2.line requires it\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n y1 = int(y1)\n y2 = int(y2)\n\n return ((x1, y1), (x2, y2))", "def plot(self, x, y, mX = 1, mY = 1):\n x = self.xy[x]\n y = self.xy[y]\n\n # Find pareto front:\n xp, yp = self.pareto_frontier(x,y, mX, mY)\n\n plt.plot(x, y, 'o')\n plt.plot(xp, yp, '-')\n plt.tight_layout()\n plt.show()", "def average_slope_intercept(self,image):\n left_fit = []\n right_fit = []\n if self.lines is None:\n return None\n for line in self.lines:\n for x1, y1, x2, y2 in line:\n # Polyfit computes the 1st order fitting of the lane points\n fit = np.polyfit((x1,x2), (y1,y2), 1)\n slope = fit[0]\n intercept = fit[1]\n if slope < 0: # y is reversed in image\n left_fit.append((slope, intercept))\n else:\n right_fit.append((slope, intercept))\n # add more weight to longer lines\n left_fit_average = np.average(left_fit, axis=0)\n right_fit_average = np.average(right_fit, axis=0)\n self.left_line = self.make_points(image,left_fit_average)\n self.right_line = self.make_points(image,right_fit_average)\n self.averaged_lines = [self.left_line, self.right_line]\n return self.averaged_lines", "def test_regression_of_slope_sum_distribution():\n\n meaningful_stats = pd.read_pickle(\n 'files/meaningfull_stats.pkl')\n\n print meaningful_stats['std'].tolist()\n print meaningful_stats['returns_diff'].tolist()\n\n def make_float(array):\n \"\"\"\n takes an array and makes all the number in it floats\n \"\"\"\n finial_array = []\n\n for number in array:\n finial_array.append(float(number))\n return finial_array\n\n seaborn.regplot(meaningful_stats['std'], meaningful_stats['returns_diff'])\n\n plt.title(\"STD and Returns\")\n\n plt.axhline(y=00, color='r', linestyle='-')\n\n plt.show()", "def draw_lines(asr,ax):\n r = asr.value\n y = 475.\n x = (r**2-y**2)**(.5)\n xs = np.linspace(-x,x,10)\n yt = np.zeros(xs.size)+y\n yb = np.zeros(xs.size)-y\n ax.plot(xs,yt,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n ax.plot(xs,yb,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n return ax", "def DrawLinePoint(*args, **kwargs):\n return _gdi_.DC_DrawLinePoint(*args, **kwargs)", "def get_slope(x, y, deg=1, err=[]):\n inverse_error = []\n for i in err:\n inv = 1/i\n inverse_error.append(i)\n\n if len(err)>0:\n z = np.polyfit(x, y, deg, w=inverse_error)\n else:\n z = np.polyfit(x, y, deg)\n\n m, b = z\n p = np.poly1d(z)\n\n return m, b, p" ]
[ "0.8633385", "0.837906", "0.8353659", "0.8208162", "0.80252755", "0.7704604", "0.76632464", "0.6855364", "0.6737687", "0.6706104", "0.6655756", "0.66362876", "0.6601379", "0.65816593", "0.6529675", "0.64858264", "0.6483495", "0.64728135", "0.6463536", "0.64627045", "0.64541286", "0.64335304", "0.64210016", "0.6392173", "0.6388669", "0.6354816", "0.6332207", "0.6319577", "0.63021046", "0.6281884", "0.62248576", "0.6219346", "0.62033296", "0.61819917", "0.6180803", "0.6177797", "0.616765", "0.6137515", "0.6125023", "0.6112288", "0.6103033", "0.6091219", "0.6082846", "0.60717803", "0.6067914", "0.6058348", "0.60462224", "0.60455376", "0.6042077", "0.6019379", "0.6004364", "0.598862", "0.5943156", "0.5934011", "0.59278864", "0.59098095", "0.5895657", "0.5883459", "0.5878388", "0.5876876", "0.58661264", "0.58660847", "0.58539957", "0.58473754", "0.5843465", "0.5833406", "0.5826755", "0.5823647", "0.58217114", "0.58159983", "0.58121574", "0.58047915", "0.5797959", "0.5796716", "0.5788628", "0.57848614", "0.5782377", "0.57756615", "0.5773158", "0.57730925", "0.5772088", "0.5752896", "0.5742748", "0.57353204", "0.57330513", "0.5725033", "0.5716351", "0.5715985", "0.57104146", "0.57099897", "0.5706574", "0.5705442", "0.57038754", "0.5694227", "0.56880176", "0.56874776", "0.56825155", "0.5657237", "0.56459737" ]
0.83326244
3
Only for human genome. If this is applied to other species, futher modification is needed.
def mapChrForVersion(c): if c.startswith('chrM'): return 998 elif c == 'chrX': return 999 elif c == 'chrY': return 1000 else: return int(c[3:])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_species(self):\n warn(f\"Default Update Species Called for Mechanism = {self.name}.\")\n return []", "def update_species_frames(self):\n pass", "def speciate(self):\n\n\n # Clear out the previous generation\n for spec in self.species.values():\n spec.champ = spec.get_champion()\n spec.flush()\n\n for genome in self.all_genomes:\n if genome.species_hint is not None:\n spec = self.species.get(genome.species_hint)\n if spec and spec.is_compatible(genome):\n spec.add_genome(genome)\n continue\n\n for spec in self.species.values():\n # check compatibility until found\n if spec.is_compatible(genome):\n spec.add_genome(genome)\n break\n else: # make a new spec\n spec_num = self.get_next_species_num()\n spec = Species(self, spec_num)\n spec.add_genome(genome)\n spec.champ = genome\n self.species[spec_num] = spec\n\n # Delete unnecessary species\n for spec_num, spec in list(self.species.items()):\n if len(spec)==0:\n self.species.pop(spec_num)", "def speciate_genomes(self):\n for genome in self.genomes:\n species_found = False\n\n for species in self.species:\n compatibility = genome.compatibility(species.leader)\n\n if compatibility < self.config.compatibility_threshold:\n species.add_genome(genome)\n species_found = True\n break\n\n if not species_found:\n new_species = Species(self.species_id, genome, self.config, self.breeder)\n self.species.append(new_species)\n self.species_id += 1", "def set_species(self, species):\n self.species = species", "def mutate(genome):\n mutated_genome = copy.deepcopy(genome) # make a copy of the DNA to mutate\n seed = random.randint(0,3)\n if len(mutated_genome) == 0: seed = 0\n if seed == 0:\n mutate_chromosome(mutated_genome)\n elif seed == 1:\n mutate_point(mutated_genome)\n elif seed == 2:\n mutate_color(mutated_genome)\n else: #seed ==3:\n mutate_opacity(mutated_genome)\n return mutated_genome", "def inherit(self, genome):\n self.da_scheme = genome.da_scheme\n self.weight_init = copy.deepcopy(genome.weight_init)\n self.species_module_ref_map = genome.species_module_ref_map", "def _fix_genotypes_object(self, genotypes, variant_info):\n # Checking the name (if there were duplications)\n if self.has_index and variant_info.name != genotypes.variant.name:\n if not variant_info.name.startswith(genotypes.variant.name):\n raise ValueError(\"Index file not synced with IMPUTE2 file\")\n genotypes.variant.name = variant_info.name\n\n # Trying to set multi-allelic information\n if self.has_index and self._index_has_location:\n # Location was in the index, so we can automatically set the\n # multi-allelic state of the genotypes\n genotypes.multiallelic = variant_info.multiallelic\n\n else:\n # Location was not in the index, so we check one marker before and\n # after the one we found\n logging.warning(\"Multiallelic variants are not detected on \"\n \"unindexed files.\")", "def update_species(self, mixture, concentration):\n # Create component specific species\n # self.product = add_species(model, \"Type\", name)\n \n # Create any other species needed by component mechanisms\n mechanisms = get_mechanisms(mixture, self)\n for name in mechanisms:\n mechanism = mechanisms[name]\n mechanism.update_species(mixture, self, concentration)\n \n # If the default member function gets used, issue a warning\n warn(\"component: default __init__ called for \" + name)", "def mutate(self, organism):\n mutated_org = organism.copy()\n \n gene_choices = mutated_org.genome.alphabet.letters\n \n # potentially mutate any gene in the genome\n for gene_index in range(len(mutated_org.genome)):\n mutation_chance = self._mutation_rand.random()\n # if we have a mutation\n if mutation_chance <= self._mutation_rate:\n # get a new letter\n new_letter = self._switch_rand.choice(gene_choices)\n mutated_org.genome[gene_index] = new_letter\n\n return mutated_org", "def update (self) :\n for met in self.gene :\n met(self)", "def mutate(self, chrom):\n pass", "def mutate_chromosome(mutated_genome):\n seed = random.randint(0,5)\n if len(mutated_genome) <= 1: seed = 0\n if seed == 0:\n insert_chromosome(mutated_genome)\n elif seed == 1:\n remove_chromosome(mutated_genome)\n elif seed == 2:\n switch_chromosomes(mutated_genome)\n elif seed == 3:\n shuffle_chromosomes(mutated_genome)\n elif seed == 4:\n increment_chromosome(mutated_genome)\n else: #seed == 5:\n decrement_chromosome(mutated_genome)", "def mutate(self, organism):\n mutated_org = organism.copy()\n gene_choices = mutated_org.genome.alphabet.letters\n\n mutation_chance = self._mutation_rand.random()\n if mutation_chance <= self._mutation_rate:\n # pick a gene position to mutate at\n mutation_pos = \\\n self._pos_rand.choice(range(len(mutated_org.genome)))\n \n # get a new letter to replace the position at\n new_letter = self._switch_rand.choice(gene_choices)\n\n mutated_org.genome[mutation_pos] = new_letter\n\n return mutated_org", "def mutate_seq(genome):\n for var in genome.get_variants():\n if var.type == \"snp\":\n mutate_snp(genome, var)\n elif var.type == \"indel\":\n mutate_indel(genome, var)\n elif var.type == \"deletion\":\n mutate_deletion(genome, var)\n elif var.type == \"translocation origin\":\n mutate_trans_orig(genome, var)\n elif var.type == \"translocation insert\":\n mutate_trans_ins(genome, var)", "def inject_genome(self, genome: Genome):\n self.population[genome.key] = genome", "def mutate_append(self, gene):\n gene.chromosome.append(self.tactics.mutate_select())", "def testExcessiveRamReallocation(self):\n self.assertEqual(1769, self.c.get_species_richness(1))\n self.assertEqual(1769, self.c.get_species_richness(2))", "def switch_chromosomes(mutated_genome):\n index1 = random.randint(0,max(0,len(mutated_genome)-1))\n index2 = random.randint(0,max(0,len(mutated_genome)-1))\n temp = mutated_genome[index1]\n mutated_genome[index1] = mutated_genome[index2]\n mutated_genome[index2] = temp", "def testDispersalMapSimulation(self):\n self.assertEqual(701, self.c.get_species_richness(1))", "def test_enlarge_1_add_nonreactive_species(self):\n m0 = Molecule(smiles='[He]')\n spc0 = self.rmg.reaction_model.make_new_species(m0, label='He', reactive=False)[0]\n self.rmg.reaction_model.enlarge(spc0)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 1)\n self.assertFalse(self.rmg.reaction_model.core.species[0].reactive)", "def update_monster(self):\n\n\t\t# if nothing else gets added to this (no other changes to update) you could delete\n\t\t# this function and simply call self.choose_guard() in its place\n\t\tself.guarded_area = self.choose_guard()", "def test_enlarge_2_add_reactive_species(self):\n m1 = Molecule(smiles='CC')\n spc1 = self.rmg.reaction_model.make_new_species(m1, label='C2H4')[0]\n self.rmg.reaction_model.enlarge(spc1)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 2)\n self.assertTrue(self.rmg.reaction_model.core.species[1].reactive)\n\n m2 = Molecule(smiles='[CH3]')\n spc2 = self.rmg.reaction_model.make_new_species(m2, label='CH3')[0]\n self.rmg.reaction_model.enlarge(spc2)\n\n self.assertEqual(len(self.rmg.reaction_model.core.species), 3)\n self.assertTrue(self.rmg.reaction_model.core.species[2].reactive)", "def add_animal(self, animal):\n try:\n if animal.saltwater:\n super().add_animal(animal)\n except AttributeError:\n raise AttributeError(\"Animal Is Incompatible With Biome\")", "def mutate(self):\n\n if len(self.genes) < 250:\n for g in self.genes:\n\n if MUTATION_CHANCE < random.random(): # random.random() gives float in [0,1)\n g.mutate()\n\n else:\n k = int(MUTATION_CHANCE*len(self.genes))\n for g in random.sample(self.genes,int(k)): #int(k)\n g.mutate()\n\n #To add random gene\n if ADD_GENE_CHANCE < random.random():\n self.genes.append(Gene(self.size)) #Call to Gene to add to genes list\n\n #To randomly remove genes\n\n if REM_GENE_CHANCE < random.random() and len(self.genes)>0:\n self.genes.remove(random.choice(self.genes))", "def exogenous_change(self):\n pass", "def exogenous_change(self):\n pass", "def exogenous_change(self):\n pass", "def species_converter():\n \n geneD = gene_list_converter() # dict with keys as human gene IDS, and values as human gene symbols\n convertD = homologue_parser() # dict with human gene symbols as keys, and mouse gene symbols as values\n# k = 0\n# for i,j in geneD.items():\n# print(i,j)\n# k += 1\n# if k == 10000: break\n# print(\"------------------------------------------------\")\n# k = 0\n# for i,j in convertD.items():\n# print(i,j)\n# k += 1\n# if k == 100: break\n \n procD = {} # key is human gene ID, value is a dict with human gene symbol as key, mouse gne symbol as value\n missL = []\n missIDL = []\n \n for geneK, geneV in geneD.items(): # geneK: human gene ID, geneV = human gene symbol\n if geneV == \"-\": # handle missing gene symbols\n procD[geneK] = {\"-\": [\"-\"]}\n continue\n \n if geneV in convertD:\n if geneK in procD:\n print(geneK,geneV,procD[geneK])\n raise ValueError\n \n else: procD[geneK] = {geneV: convertD[geneV]}\n \n else: \n missL.append(geneV) # collect gene symbols for which no mouse homologue was found\n missIDL.append(geneK) # collect matching gene IDs too\n procD[geneK] = {geneV: [\"-\"]} # store entries as missing for now\n \n missSeqD = prot_sequence_finder(missL) # prepare a dict with keys as missing human gene symbols and values as their sequences in humans where applicable\n missNameL = []\n missGIL = []\n print(\"BLASTing \" + str(len(missSeqD)) + \" sequences...\")\n for keyS, valueS in missSeqD.items():\n missGIL.append(blaster(valueS)) # blast sequences and get their mouse refseq protein GI. this step will take a lot of time. this list will contain mouse protein genbank accessions\n missNameL.append(keyS) # the matching human gene symbols\n \n missSymbolD = prot_id_converter(missGIL, \"10090\", \"genbankproteinaccession\", \"genesymbol\") # convert protein GIs to gene symbols. keys are mouse protein GIs, values are mouse gene symbols\n print(missSymbolD)\n\n \n for i in range(len(missIDL)):\n if missL[i] in missNameL: procD[missIDL[i]] = {missL[i]: [missSymbolD[missGIL[missNameL.index(missL[i])]]]} \n \n mouseGeneL = []\n for keyS in procD.keys():\n for valueL in procD[keyS].values(): \n if valueL == [\"-\"]: continue\n for valueI in valueL:\n if valueI in mouseGeneL: continue\n mouseGeneL.append(valueI)\n \n mouseD = prot_id_converter(mouseGeneL, \"10090\", \"genesymbol\", \"geneid\")\n \n with file_importer(\"data/converted_gene_list.csv\", \"w\") as outF:\n for keyS in procD.keys():\n outF.write(keyS + \",\")\n for keyN, valueN in procD[keyS].items():\n outF.write(keyN + \",\")\n if keyN == \"-\":\n outF.write(\"-,-\\n\")\n continue\n valL = []\n for valueI in valueN:\n if valueI in mouseD: valL.append(mouseD[valueI])\n else: valL.append(\"-\")\n \n if valueI is valueN[-1]: outF.write(valueI + \",\")\n else: outF.write(valueI + \";\")\n \n for valI in valL:\n if valI is valL[-1]: \n if \"//\" in valI: \n valIL = valI.split(\"//\")\n for valILI in valIL:\n if valILI is valIL[-1]: outF.write(valILI + \"\\n\")\n else: outF.write(valILI + \";\")\n else: outF.write(valI + \"\\n\")\n \n else: \n if \"//\" in valI: \n valIL = valI.split(\"//\")\n for valILI in valIL:\n outF.write(valILI + \";\")\n else: outF.write(valI + \";\")\n print(\"file written\")", "def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass", "def test_unique_genome(self):\n p1 = self.player()\n p2 = self.player()\n self.assertTrue(p1.genome is p2.genome)", "def mutate(self, child):\n for i in range(0, self.chromosome_length):\n if random.randint(1, 100) <= self.mutation_chance:\n child[i] = self.random_gene()\n return child", "def try_insert_genome(self, genome):\n raise Exception(\"called abstract insert_genome method\")", "def switch_opacities(mutated_genome):\n index1 = random.randint(0,max(0,len(mutated_genome)-1))\n index2 = random.randint(0,max(0,len(mutated_genome)-1))\n temp = mutated_genome[index1][1]\n mutated_genome[index1][1] = mutated_genome[index2][1]\n mutated_genome[index2][1] = temp", "def set_optimizeable_hydrogens(self):\n for residue in self.biomolecule.residues:\n optinstance = self.is_optimizeable(residue)\n if optinstance is None:\n continue\n for atom in residue.atoms:\n if atom.name in optinstance.map:\n atom.optimizeable = 1", "def _update(self, other):\n # NOTE: detail map properties should NEVER be overridden. NEVER. EVER. kthx.\n if other.use_alpha:\n self.use_alpha = True\n if other.mipmap:\n self.mipmap = True", "def fix_chromosome(self, copy=False):\n region = self.copy() if copy else self\n if region.chromosome.startswith('chr'):\n region.chromosome = region.chromosome[3:]\n else:\n region.chromosome = 'chr' + region.chromosome\n return region", "def adjust_fitness_scores(self):\n\n for species in self.species:\n species.adjust_fitness()", "def test_migrate_all_herbi_in_cell_new_location(\n standard_map_peninsula):\n animals.Herbivores.parameters[\"mu\"] = 1000\n mock_ek = {(1, 18): 2}\n standard_map_peninsula.raster_model[(\n 1, 19)]._migrate_all_herbivores_in_cell(\n standard_map_peninsula, (1, 19), mock_ek)\n animals.Herbivores.parameters[\"mu\"] = 0.25\n assert standard_map_peninsula.raster_model[(1, 19)].herbivore_list == []\n assert standard_map_peninsula.raster_model[(1, 18)].herbivore_list != []", "def save(self, *args, **kwargs):\n self.chromosome_no = CHROMOSOME_STR_TO_CHROMOSOME_INT.get(self.chromosome, 0)\n super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n self.chromosome_no = CHROMOSOME_STR_TO_CHROMOSOME_INT.get(self.chromosome, 0)\n super().save(*args, **kwargs)", "def remove_unimproved_species(self):\n for spec_num, spec in list(self.species.items()):\n if self.gen_num - spec.gen_last_improved > self.species_dropoff_age:\n self.species.pop(spec_num)", "def family(self):", "def add_species(self, side='R'): \n self.unpack_ecosystem()\n if side == 'L':\n self.ecosystem.insert(0,Species(self))\n elif side == \"R\":\n self.ecosystem.append(Species(self))\n self.num_species+=1\n self.pack_ecosystem()\n self.request_payment(\"discard\")", "def replaceLoser(self, parents, newoffspring): \n\n loser = min(parents, key=self.applyFitness)\n\n for index, individual in enumerate(self.currentGeneration):\n if (individual == loser):\n self.currentGeneration[index] = newoffspring", "def test_patch_bios_unit(self):\n pass", "def test_big_family(self):\n\n self.taxon_tester('Staphylinidae')", "def reindex_subcomponent_taxa(self):\n ti_mutable = self.taxon_set._is_mutable\n self.taxon_set._is_mutable = True\n new_map = CharacterDataMap()\n for taxon, seq in self.taxon_seq_map.items():\n taxon = self.taxon_set.require_taxon(label=taxon.label)\n new_map[taxon] = seq\n self.taxon_set._is_mutable = ti_mutable\n self.taxon_seq_map = new_map", "def adjust_fitness(self):\n # see genetics.cpp:2668 \"Can change the fitness of the organisms in the\n # species to be higher for very new species (to protect them)\"\n # NOTE I don't believe this is found in the paper\n # Looks like they used a 1 for this param anyway, so it didn't do\n # anything\n\n cur_max = self.get_champion().fitness\n if cur_max > self.max_fitness_ever:\n self.max_fitness_ever = cur_max\n self.gen_last_improved = self.pop.gen_num\n\n for g in self.genomes:\n g.adj_fitness = g.fitness/len(self)\n\n # genetics.cpp:2699 Kill species that haven't progressed for a long\n # time by dividing fitness of all individuals in spec by 100. Weird way\n # to do it.\n if ((self.pop.gen_num - self.gen_last_improved) >\n self.pop.species_dropoff_age):\n for g in self.genomes:\n g.adj_fitness *= .01", "def mountable(self):\n return True", "def pre_modify(self):\n return 0", "def clone(self):\n raise GAError, 'must define clone() in your genome class'", "def testDispersalSimulation(self):\n self.assertEqual(1172, self.c.get_species_richness(1))", "def add_gene(self, human_gene, ortholog):\n if human_gene not in self.genes:\n self.genes[human_gene] = list()\n self.genes[human_gene].append(ortholog)", "def update_taxon_set(self):\n assert self.taxon_set is not None\n for taxon in self.taxon_seq_map:\n if taxon not in self.taxon_set:\n self.taxon_set.add(taxon)", "def mutate(self, mutation_record, attribute_magnitude=1, topological_magnitude=1, module_population=None, gen=-1):\n if Config.module_retention and random.random() < 0.1 * topological_magnitude and self.species_module_ref_map:\n # release a module_individual\n tries = 100\n\n while tries > 0:\n species_no = random.choice(list(self.species_module_ref_map.keys()))\n if self.species_module_ref_map[species_no] is not None:\n self.species_module_ref_map[species_no] = None\n break\n tries -= 1\n\n if Config.evolve_data_augmentations and random.random() < 0.2:\n self.da_scheme = None\n\n if Config.blueprint_nodes_use_representatives:\n # All representative mutation detailed in Sasha's paper section 3.2.4\n reps = self.representatives\n for node in self._nodes.values():\n if gen == -1:\n raise Exception('Invalid generation number: -1')\n\n # Increase mutation chance early to better explore the space of representatives\n chance = Config.rep_mutation_chance_early if gen <= 3 else Config.rep_mutation_chance_late\n if random.random() > chance: # no rep mutation\n continue\n\n old_rep = copy.deepcopy(node.representative)\n new_rep = node.choose_representative(module_population.individuals, reps)\n\n # Chance to mutate all nodes with the same representative\n if random.random() < Config.similar_rep_mutation_chance:\n for other_node in self._nodes.values():\n if other_node.representative == old_rep:\n other_node.representative = new_rep\n\n nodes_before_mutation = set(self._nodes.keys())\n mutated = super()._mutate(mutation_record, Props.BP_NODE_MUTATION_CHANCE, Props.BP_CONN_MUTATION_CHANCE,\n attribute_magnitude=attribute_magnitude, topological_magnitude=topological_magnitude)\n # Check if a node was added and assign it a representative\n if Config.blueprint_nodes_use_representatives:\n for node_id in self._nodes.keys():\n if node_id not in nodes_before_mutation:\n self._nodes[node_id].choose_representative(module_population.individuals, reps)\n\n return mutated", "def genome(self, genome_id=\"hg19\"):\n self.command(\"genome %(genome_id)s\" % locals())", "def registeredBy(self, world):\n self.world = world\n self._determineSuffix()\n self.short = \"\"\n self.short = self.shortName(3)", "def inherit_species_module_mapping(self, generation, other, acc, da_scheme=None, inherit_module_mapping=True):\n if acc > self.max_accuracy:\n if inherit_module_mapping:\n other.update_module_refs(generation)\n self.species_module_ref_map = other.species_module_ref_map\n\n self.max_accuracy = acc\n\n if da_scheme is not None:\n self.da_scheme = da_scheme", "def add_species(self, side='R'): \n #self.disable_all_buttons() \n Player.add_species(self, side=side)", "def test_make_new_species(self):\n\n # adding 3 unique species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('[OH]'),\n Species().from_smiles('CC'),\n Species().from_smiles('[CH3]')]\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), len(spcs))\n self.assertEquals(len(cerm.index_species_dict), len(spcs))\n\n # adding 3 unique, and 1 already existing species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('[OH]'),\n Species().from_smiles('CC'),\n Species().from_smiles('[CH3]'),\n Species().from_smiles('CC')] # duplicate species\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), len(spcs) - 1)\n self.assertEquals(len(cerm.index_species_dict), len(spcs) - 1)", "def is_other_chromosome(chromosome_name):\n if is_cassette_chromosome(chromosome_name): return False\n if chromosome_name.startswith('chr') or chromosome_name.startswith('scaffold'): return False\n else: return True", "def add_genome(self, genome):\n self.genomes.append(genome)", "def update(self):\n self.chromosome_list = self.next_population\n self.reset_mating_pool()\n self.reset_next_population()", "def setAssociatedSpecies(self, *args):\n return _libsbml.GeneProduct_setAssociatedSpecies(self, *args)", "def testDispersalSimulation(self):\n self.assertEqual(715, self.c.get_species_richness(1))", "def mezclar_bolsa(self):", "def update(self, species, poscar, sort=True):\n\n if sort == False:\n # for each 'tag' in the IndividualSpecies, create a list in self.tags\n for key in list(species.values())[0].tags.keys():\n if key.lower() in (VASP_TAG_INT_LIST + VASP_TAG_FLOAT_LIST):\n self.tags[key] = 0.\n for site in poscar.basis:\n self.tags[key] += float(species[site.occupant].tags[key])\n if key.lower() in VASP_TAG_INT_LIST:\n self.tags[key] = int(self.tags[key])\n else:\n self.tags[key] = []\n if key.lower() in (VASP_TAG_SPECF_LIST + VASP_TAG_SPECI_LIST):\n # add the value of the 'tag' for each species into the self.tags list\n for spec in poscar.type_atoms_alias:\n self.tags[key].append(species[spec].tags[key])\n else:\n # add the value of the 'tag' for each atom into the self.tags list\n for site in poscar.basis:\n self.tags[key].append( species[site.occupant].tags[key] )\n else:\n pos = poscar.basis_dict()\n # for each 'tag' in the IndividualSpecies, create a list in self.tags\n for key in list(species.values())[0].tags.keys():\n # for key in species[species.keys()[0]].tags.keys():\n if key.lower() in (VASP_TAG_INT_LIST + VASP_TAG_FLOAT_LIST):\n self.tags[key] = 0.\n for site in poscar.basis:\n self.tags[key] += float(species[site.occupant].tags[key])\n if key.lower() in VASP_TAG_INT_LIST:\n self.tags[key] = int(self.tags[key])\n else:\n self.tags[key] = []\n # add the value of the 'tag' for each atom into the self.tags list\n for alias in sorted(pos.keys()):\n if key.lower() in (VASP_TAG_SPECF_LIST + VASP_TAG_SPECI_LIST):\n # for species-specific tags, use the value specified for the\n # species whose pseudopotential is being used for this alias\n for name in species.keys():\n if species[name].alias == alias and species[name].write_potcar:\n self.tags[key].append(species[name].tags[key])\n break\n else:\n for name in species.keys():\n count=0\n for site in pos[alias]:\n if site.occupant == name:\n count += 1\n if species[name].alias == alias:\n if count > 0:\n self.tags[key].append( str(count) + \"*\" + str(species[name].tags[key]) )", "def _init_inherit_physical_attributes(self):\n config = self.person.cosmos.config\n mother, father = self.person.biological_mother, self.person.biological_father\n parents = (mother.body, father.body)\n # Handedness\n if random.random() < config.heritability_of_handedness:\n takes_after = random.choice(parents)\n self.left_handed = Feature(value=takes_after.left_handed, inherited_from=takes_after)\n self.right_handed = Feature(value=takes_after.right_handed, inherited_from=takes_after)\n # Hustle\n if random.random() < config.heritability_of_hustle:\n takes_after = random.choice(parents)\n inherited_hustle = takes_after.hustle\n mutated_hustle = normal(inherited_hustle, config.hustle_mutation_sd)\n self.hustle = Feature(value=mutated_hustle, inherited_from=takes_after)\n else:\n pass # TODO SET UP GENERATING FROM NOTHING", "def mutate_point(mutated_genome):\n if phenotype == 'Poly':\n mutate_point_poly(mutated_genome)\n elif phenotype == 'Poly3':\n mutate_point_poly3(mutated_genome)\n elif phenotype == 'Trig':\n mutate_point_trig(mutated_genome)\n elif phenotype == 'Circ':\n mutate_point_circ(mutated_genome)\n elif phenotype == 'Ellip':\n # ellipses points are the same as rectangles\n mutate_point_rect(mutated_genome)\n elif phenotype == 'Rect':\n mutate_point_rect(mutated_genome)\n elif phenotype == 'Line':\n # poly3 is same point setup as line\n mutate_point_poly3(mutated_genome)\n elif phenotype == 'WLine':\n mutate_point_wline(mutated_genome)", "def _set_joint_genome_info(self, gene_annotation_header_values, total_genes_in_genome_values):\n # Merge any pieces of global information that's not per-dataset\n self.gene_annotation_header = merge_values_to_unique(gene_annotation_header_values, blank_value=[], convert_for_set=tuple, \n value_name='gene_annotation_header', context='datasets in multi-dataset')\n self.total_genes_in_genome = merge_values_to_unique(total_genes_in_genome_values, blank_value=0, \n value_name='total_genes_in_genome', context='datasets in multi-dataset')", "def update_gene_info(ensembl_info, word, value):\n if \"gene\" in word:\n if \"id\" in word:\n ensembl_info[\"ensembl_gene_id\"] = value\n elif \"start\" in word:\n ensembl_info[\"gene_start\"] = int(value)\n elif \"end\" in word:\n ensembl_info[\"gene_end\"] = int(value)\n return ensembl_info", "def set_occupant(self):\n\t\tself.occupant = 1", "def on_sense_sonar(self, dist):\n raise NotImplementedError()", "def gene(self, idx, value):\r\n self.genes[idx] = value", "def set_animal_parameters(species, params):\n animals = {'Herbivore': Herbivore, 'Carnivore': Carnivore}\n animals[species].set_params(params)", "def mutate_opacity(mutated_genome):\n seed = random.randint(0,2)\n if seed == 0:\n new_opacity(mutated_genome)\n elif seed == 1:\n change_opacity(mutated_genome)\n else: #seed == 2:\n switch_opacities(mutated_genome)\n #else: #seed == 3: # depricated\n # shuffle_opacities(mutated_genome)", "def __init__(self, config: GenedescConfigParser, species: str, go_relations: List[str] = None,\n do_relations: List[str] = None, use_cache: bool = False):\n self.config = config\n raw_files_source = config.get_wb_raw_file_sources()\n cache_location = config.get_cache_dir()\n release_version = config.get_wb_release()\n organisms_info = config.get_wb_organisms_info()\n project_id = organisms_info[species][\"project_id\"]\n self.sister_sp_fullname = \"\"\n if \"main_sister_species\" in organisms_info[species] and \"full_name\" in \\\n organisms_info[organisms_info[species][\"main_sister_species\"]]:\n self.sister_sp_fullname = organisms_info[organisms_info[species][\"main_sister_species\"]][\"full_name\"]\n self.orth_fullnames = \"\"\n if \"ortholog\" in organisms_info[species] and all([\"full_name\" in organisms_info[ortholog_sp] for ortholog_sp in\n organisms_info[species][\"ortholog\"]]):\n self.orth_fullnames = [organisms_info[ortholog_sp][\"full_name\"] for ortholog_sp in\n organisms_info[species][\"ortholog\"]]\n expression_cluster_anatomy_prefix = organisms_info[species][\"ec_anatomy_prefix\"] if \\\n \"ec_anatomy_prefix\" in organisms_info[species] else None\n expression_cluster_molreg_prefix = organisms_info[species][\"ec_molreg_prefix\"] if \\\n \"ec_molreg_prefix\" in organisms_info[species] else None\n expression_cluster_genereg_prefix = organisms_info[species][\"ec_genereg_prefix\"] if \\\n \"ec_genereg_prefix\" in organisms_info[species] else None\n super().__init__(go_relations=go_relations, do_relations=do_relations, use_cache=use_cache)\n self.gene_data_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"species\", species,\n project_id, \"annotation\", species + '.' + project_id +\n '.' + release_version + \".geneIDs.txt.gz\")\n self.gene_data_url = raw_files_source + '/' + release_version + '/species/' + species + '/' + project_id + \\\n '/annotation/' + species + '.' + project_id + '.' + release_version + '.geneIDs.txt.gz'\n self.go_ontology_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"gene_ontology.\" + release_version + \".obo\")\n self.go_ontology_url = raw_files_source + '/' + release_version + '/ONTOLOGY/gene_ontology.' + \\\n release_version + '.obo'\n self.go_associations_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"species\", species,\n project_id, \"annotation\", species + \".\" + project_id + \".\" + release_version +\n \".gene_association.wb.gz\")\n self.go_associations_url = raw_files_source + '/' + release_version + '/species/' + species + '/' + project_id + \\\n '/annotation/' + species + '.' + project_id + '.' + release_version + \".gene_association.wb.gz\"\n self.do_ontology_url = raw_files_source + '/' + release_version + '/ONTOLOGY/disease_ontology.' + \\\n release_version + '.obo'\n self.do_ontology_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"disease_ontology.\" + release_version + \".obo\")\n self.do_associations_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"disease_associations.by_orthology.\" + release_version +\n \".tsv.txt\")\n self.do_associations_url = raw_files_source + '/' + release_version + \\\n '/ONTOLOGY/disease_association.by_orthology.' + release_version + '.tsv.txt'\n self.do_associations_new_cache_path = os.path.join(cache_location, \"wormbase\", release_version, 'ONTOLOGY',\n 'disease_association.' + release_version + '.daf.txt')\n self.do_associations_new_url = raw_files_source + '/' + release_version + '/ONTOLOGY/disease_association.' + \\\n release_version + '.daf.txt'\n self.orthology_url = raw_files_source + '/' + release_version + '/species/' + species + '/' + project_id + \\\n '/annotation/' + species + '.' + project_id + '.' + release_version + '.orthologs.txt.gz'\n self.orthology_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"species\", species,\n project_id, \"annotation\", species + '.' + project_id + '.' +\n release_version + \".orthologs.txt.gz\")\n self.orthologs = defaultdict(lambda: defaultdict(list))\n self.protein_domain_url = raw_files_source + '/' + release_version + '/species/' + species + '/' + \\\n project_id + '/annotation/' + species + '.' + project_id + '.' + release_version + \\\n '.protein_domains.csv.gz'\n self.protein_domain_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"species\", species,\n project_id, \"annotation\", species + '.' + project_id +\n '.' + release_version + \".protein_domains.csv.gz\")\n self.protein_domains = defaultdict(list)\n self.expression_ontology_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"anatomy_ontology.\" + release_version + \".obo\")\n self.expression_ontology_url = raw_files_source + '/' + release_version + '/ONTOLOGY/anatomy_ontology.' + \\\n release_version + '.obo'\n self.expression_associations_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"anatomy_association.\" + release_version + \".wb\")\n self.expression_associations_url = raw_files_source + '/' + release_version + \\\n '/ONTOLOGY/anatomy_association.' + release_version + '.wb'\n self.expression_cluster_anatomy_url = self._get_expression_cluster_url(\n prefix=expression_cluster_anatomy_prefix, ec_type=\"anatomy\", release_version=release_version)\n self.expression_cluster_anatomy_cache_path = self._get_expression_cluster_cache_path(\n prefix=expression_cluster_anatomy_prefix, ec_type=\"anatomy\", release_version=release_version,\n cache_location=cache_location)\n self.expression_cluster_anatomy_data = defaultdict(list) if self.expression_cluster_anatomy_url else None\n self.expression_cluster_molreg_url = self._get_expression_cluster_url(\n prefix=expression_cluster_molreg_prefix, ec_type=\"molReg\", release_version=release_version)\n self.expression_cluster_molreg_cache_path = self._get_expression_cluster_cache_path(\n prefix=expression_cluster_molreg_prefix, ec_type=\"molReg\", release_version=release_version,\n cache_location=cache_location)\n self.expression_cluster_molreg_data = defaultdict(list) if self.expression_cluster_molreg_url else None\n self.expression_cluster_genereg_url = self._get_expression_cluster_url(\n prefix=expression_cluster_genereg_prefix, ec_type=\"geneReg\", release_version=release_version)\n self.expression_cluster_genereg_cache_path = self._get_expression_cluster_cache_path(\n prefix=expression_cluster_genereg_prefix, ec_type=\"geneReg\", release_version=release_version,\n cache_location=cache_location)\n self.expression_cluster_genereg_data = defaultdict(list) if self.expression_cluster_genereg_url else None", "def surgery(actuation_name,exposed_robot,transform_robot,possibility):\n goodluck = random.randint(0,100)\n \n if goodluck < possibility: # Surgery will transform exposed_robot to transform_robot with base stats of transform_robot.\n old_name = exposed_robot.name #Store the old name of exposed_robot\n exposed_robot = transform_robot.__class__() \n print(\"{0} is affected by {2}. It's name is now {1}!\".format(old_name,exposed_robot.name,actuation_name))\n return exposed_robot\n else:\n print(\"{1} is not succesful on {0}!\".format(exposed_robot.name,actuation_name))\n return exposed_robot", "def modified(self):\n raise NotImplementedError", "def test_species(self):\n spec = input.species('C2H4', os.path.join(self.directory, 'species', 'C2H4', 'ethene.py'))\n self.assertTrue(isinstance(spec, Species))\n self.assertEqual(len(spec.molecule), 0)", "def update_module_refs(self, generation):\n self.species_module_ref_map = {}\n\n if Config.blueprint_nodes_use_representatives:\n # For representatives species_module_ref_map becomes: representative -> chosen module\n reps = self.representatives\n for rep, (spc_index, module_index) in self.species_module_index_map.items():\n if rep not in reps: # removes reps that no longer exist\n continue\n self.species_module_ref_map[rep] = generation.module_population.species[spc_index][module_index]\n else:\n for spc_index, module_index in self.species_module_index_map.items():\n if isinstance(module_index, tuple):\n \"\"\"this is an override index. this module is found in a different species\"\"\"\n if not Config.allow_cross_species_mappings:\n raise Exception('Cross species mapping disabled, but received tuple as value in map')\n spc, mod = module_index\n self.species_module_ref_map[spc_index] = generation.module_population.species[spc][mod]\n else:\n self.species_module_ref_map[spc_index] = generation.module_population.species[spc_index][\n module_index]", "def strainNameHomogene(strain):\n\tstrain = strain.split(\",\")[0]\n\tstrain = strain.split(\"=\")[0]\n\tstrain = strain.split(\"(\")[0]\n\tstrain = strain.split(\"genome\")[0]\n\twords = [\"chromosome \", \"chromosome\", \"genomic island \", \"variant \", \"main \", \"complete \", \"genomic \", \"sequence \", \"sequence\", \"island \", \"Salmonella Genomic Island 1 \", \"genome \", \"strain \", \"plasmid \", \"str. \", \"REU80928 \"]\n\tfor word in words:\n\t\tstrain = strain.replace(word, \"\")\n\tstrain = strain.replace(\":\", \"-\")\n\tstrain = strain.replace(\" \", \"_\")\n\tif strain[-1] == \"_\": strain.rstrip()\n\tif strain[-1] == \"_\": strain.rstrip()\n\treturn strain", "def make_synthetic_genome(human, phix, bacteria, size, dir, isfastq):\n \n # generate human reads\n get_human_reads(human, size, dir, isfastq)\n \n # generate phix reads\n get_phix_reads(phix, size, dir, isfastq)\n \n # generate bacteria reads\n get_bacteria_reads(bacteria, size, dir, isfastq)\n \n # generate virus reads\n get_virus_reads(1 - human - phix - bacteria, size, dir, isfastq)", "def _add_transform_genes(self):\n pass", "def moi(self):\n\n pass", "def save(self, *args, **kwargs):\n self.trait_flavor_name = self.set_trait_flavor_name()\n # Call the \"real\" save method.\n super(HarmonizedTrait, self).save(*args, **kwargs)", "def __species_annotation__(self,aggregation_so_far,annotation):\n return Survey.__species_annotation__(self,aggregation_so_far,[annotation])", "def get_my_mutations(quality_cutoff, coverage_cutoff):\n\n # my_mutations = {}\n # with open('/home/perry/Projects/loh/working/murim.exome.aa_chg.vars') as f:\n # for line in f:\n # my_mutations[line.strip()] = True\n # return my_mutations\n\n bed_file = 'data/nimblegen/2.1M_Human_Exome_Annotation/2.1M_Human_Exome.bed'\n bed_chr2st2end, bed_chr2posLs = bed_tools.load_bed(bed_file, \n 'NimbleGen Tiled Regions')\n # NimbleGen Tiled Regions\n # Target Regions\n\n use_data_dir = '/home/perry/Projects/loh/data/all_non_ref_hg18/'\n all_somatic = {}\n all_inherited = {}\n cancer_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanT.ann')\n normal_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanN.ann')\n for exome in global_settings.exome_types:\n data_file = use_data_dir + exome\n inherited, somatic, murim = mutations.get_mutations(data_file, normal_qualities,\n cancer_qualities, quality_cutoff,\n False, coverage_cutoff)\n # only use the bed_tools NimbleGen\n # restriction for hg18 data\n for s in somatic['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_somatic[s] = True\n for i in inherited['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_inherited[i] = True\n return (set(all_somatic.keys()) & set(get_murim_covered(quality_cutoff)), set(all_inherited.keys()) & set(get_murim_covered(quality_cutoff)))", "def update_entropy(self, save=True):\n\n #min_consensus = self.mturk_assignment.hit.hit_type \\\n #.experiment_settings.min_output_consensus\n min_consensus = 3\n\n # update substance label and entropy\n self.substance = None\n substances = self.substances.filter(invalid=False) \\\n .values_list('substance_id', flat=True)\n if substances:\n self.substance_entropy = compute_entropy(substances)\n hist = Counter(substances).most_common(2)\n substance_id, count = hist[0]\n # must be at least the consensus, and larger than the 2nd choice\n if count >= min_consensus and (len(hist) == 1 or hist[1][1] < count):\n self.substance_id = substance_id\n self.quality_method = 'M'\n\n # update name label and entropy\n self.name = None\n names = self.names.filter(invalid=False) \\\n .values_list('name_id', flat=True)\n if names.exists():\n self.name_entropy = compute_entropy(names)\n hist = Counter(names).most_common(2)\n name_id, count = hist[0]\n # must be at least the consensus, and larger than the 2nd choice\n if count >= min_consensus and (len(hist) == 1 or hist[1][1] < count):\n self.name_id = name_id\n self.quality_method = 'M'\n\n # update rectified normal\n self.rectified_normal = None\n if self.planar:\n for n in self.rectified_normals.all():\n if n.better_than(self.rectified_normal):\n self.rectified_normal = n\n if self.rectified_normal and not self.rectified_normal.correct:\n self.rectified_normal = None\n\n # update bsdf\n self.bsdf_wd = None\n for b in self.bsdfs_wd.all():\n if b.gloss_correct and b.color_correct and b.better_than(self.bsdf_wd):\n self.bsdf_wd = b\n\n if save:\n self.save()", "def mutate_snp(genome, var):\n nt_options = {'A':['T','G','C'], 'T':['A','G','C'], 'G':['A','T','C'], 'C':['A','T','G']}\n n = random.randint(0,2)\n nt = nt_options.get(genome.seq[var.start])[n]\n genome.mut_seq[var.start] = nt\n\n var.ref = genome.seq[var.start]\n var.alt = nt", "def map_to_mgi(adata, copy = False):\n from pybiomart import Server\n # connest to the biomart server\n server = Server(host='http://www.ensembl.org')\n\n # retrieve the mouse data set we need\n dataset = (server.marts['ENSEMBL_MART_ENSEMBL']\n .datasets['mmusculus_gene_ensembl'])\n\n # recieve the mapping from ensembl to MGI\n conv_table = dataset.query(attributes=['ensembl_gene_id', 'external_gene_name'])\n\n # we first drop duplicates in the first column\n conv_table = conv_table.drop_duplicates(conv_table.columns.values[0])\n\n # convert the gene names from the adata object to a data frame\n adata_table = pd.DataFrame(adata.var_names)\n\n # give the first column a name\n adata_table.columns = ['Gene stable ID']\n\n # change the gene table so that the ensembl names are now the index\n conv_table = conv_table.set_index('Gene stable ID')\n\n # project the names from the conversion table on the corr. names in the\n # adata var names table\n mapping = adata_table.join(conv_table, on='Gene stable ID')\n\n # how many could we not map\n not_found_mgi = sum(pd.isnull(mapping).iloc[:,1])\n\n # how many ensg symbols did we map several times?\n rep_ensg = len(mapping.iloc[:, 0]) - len(set(mapping.iloc[:, 0]))\n\n # how many mgi symbols did we map several times?\n rep_mgi = len(mapping.iloc[:, 1]) - len(set(mapping.iloc[:, 1]))\n\n # print this information\n print('Genes where no MGI annotations where found: {}\\nENSG repetition: {}\\nMGI repetition: {}'.\\\n format(not_found_mgi, rep_ensg, rep_mgi))\n\n # fill nans in mgi column with corresponding ensembl annotations\n mapping['Gene name'].fillna(mapping['Gene stable ID'], inplace = True)\n\n # add the new gene names to the adata object\n adata.var['mgi_symbols'] = mapping['Gene name'].tolist()", "def take_control_over(self, other):\n a = self\n if a == other: return\n if util.onechancein(6): #make a master of b\n if other.master is not None:\n if other.master != a and a.master != other: #if b already had master, make a enemy of b.master\n a.history.append('In year %d %s tried to overtake the control over %s, but failed' % (world.year, a.name, other.name))\n other.master.conflict_with(a)\n else:\n if a.master == other: #if we overtook controll\n a.master = None\n try:\n other.minions.remove(a)\n except ValueError: pass\n try:\n other.master.minions.remove(other)\n except Exception : pass\n a.minions.append(other)\n other.master = a\n a.history.append('In year %d %s became boss over %s' %(world.year, a.name, other.name))", "def update_module_indexes(self, generation):\n self.species_module_index_map = {}\n\n if Config.blueprint_nodes_use_representatives:\n # For representatives species_module_index_map becomes: representative -> (species index, member index)\n for rep, module in self.species_module_ref_map.items():\n if module is None:\n continue\n\n for species_index, species in enumerate(generation.module_population.species):\n if module in species:\n self.species_module_index_map[rep] = \\\n (species_index, generation.module_population.species[species_index].members.index(module))\n break\n else:\n for spc_index, module in self.species_module_ref_map.items():\n if module is None:\n continue\n\n if spc_index < len(generation.module_population.species) and \\\n module in generation.module_population.species[spc_index]:\n\n self.species_module_index_map[spc_index] = \\\n generation.module_population.species[spc_index].members.index(module)\n\n elif Config.allow_cross_species_mappings:\n for new_species_index, species in enumerate(generation.module_population.species):\n if module in species:\n \"\"\"found module in new species\"\"\"\n self.species_module_index_map[spc_index] = \\\n (new_species_index,\n generation.module_population.species[new_species_index].members.index(module))\n break", "def setSharp(self,sharp):\n super(self.__class__, self).setSharp(self, sharp)", "def addMaster(self,masterName):\n masterInfo = modInfos[masterName]\n self.masterNames.append(masterName)\n #--Map info\n iMod = len(self.masterNames)\n #--Map masters\n masterMap = self.getMasterMap(masterInfo)\n masterRefs = masterInfo.extras['FileRefs']\n #--Get Refs types and alts\n cellRefIds = self.cellRefIds\n cellRefAlts = self.cellRefAlts\n #--Progress\n cntCells = 0\n progress = self.progress\n progress.setMax(len(masterRefs.cells))\n progress(0.0,_(\"Building \")+masterName)\n for cell,record in masterRefs.lands.items():\n self.lands[cell] = record\n for masterCell in masterRefs.cells:\n cellId = masterCell.getId()\n #--Named exterior cell?\n if not (masterCell.flags & 1) and masterCell.cellName:\n self.extCellNames.add(masterCell.cellName)\n #--New cell id?\n if cellId not in cellRefIds:\n refIds = cellRefIds[cellId] = {}\n refAlts = cellRefAlts[cellId] = {}\n #--Exiting cell id?\n else:\n refIds = cellRefIds[cellId]\n refAlts = cellRefAlts[cellId]\n #--Objects\n for object in masterCell.getObjects().list():\n (iMMod,iObj,objId) = object[:3]\n newIdKey = (iMod,iObj)\n #--Modifies a master reference?\n if iMMod:\n if iMMod >= len(masterMap):\n raise Tes3RefError(masterName,cellId,objId,iObj,iMMod,\n _('NO SUCH MASTER'))\n altKey = (masterMap[iMMod],iObj)\n oldIdKey = altKey\n #--Already modified?\n if altKey in refAlts:\n oldIdKey = refAlts[altKey]\n if oldIdKey not in refIds:\n raise Tes3RefError(masterName,cellId,objId,iObj,iMMod,\n masterInfo.masterNames[iMMod-1])\n del refIds[oldIdKey]\n refAlts[altKey] = newIdKey\n #print cellId, newIdKey, objId\n #--Save it\n refIds[newIdKey] = objId\n #--Progress\n cntCells += 1\n progress(cntCells)\n #--Debris Ids\n for type, ids in masterRefs.debrisIds.items():\n if type not in self.debrisIds:\n self.debrisIds[type] = set()\n self.debrisIds[type].update(ids)\n #--List Masters\n levListMasters = self.levListMasters\n for levList in (masterRefs.debrisIds['LEVC'] + masterRefs.debrisIds['LEVI']):\n if levList not in levListMasters:\n levListMasters[levList] = []\n levListMasters[levList].append(masterName)", "def testMakeNewSpecies(self):\n\n # adding 3 unique species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().fromSMILES('[OH]'), \n Species().fromSMILES('CC'),\n Species().fromSMILES('[CH3]')]\n\n for spc in spcs:\n cerm.makeNewSpecies(spc)\n\n self.assertEquals(len(cerm.speciesDict), len(spcs)) \n self.assertEquals(len(cerm.indexSpeciesDict), len(spcs))\n\n # adding 3 unique, and 1 already existing species:\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().fromSMILES('[OH]'), \n Species().fromSMILES('CC'),\n Species().fromSMILES('[CH3]'),\n Species().fromSMILES('CC')]#duplicate species\n\n for spc in spcs:\n cerm.makeNewSpecies(spc)\n\n self.assertEquals(len(cerm.speciesDict), len(spcs) - 1) \n self.assertEquals(len(cerm.indexSpeciesDict), len(spcs) - 1)", "def remap(self,newMasters,modMap,objMaps=[]):\n #--Masters\n self.tes3.masters = newMasters\n #--File mapping\n modMapKeys = modMap.keys()\n #--Remap iObjs\n cells_id = self.cells_id\n reObjNum = re.compile('[0-9A-Z]{8}$')\n for (iMod,objMap) in objMaps:\n cellIds = objMap.keys()\n for cellId in cellIds:\n cellObjMap = objMap[cellId]\n #--Save \n cell = cells_id.get(cellId)\n if not cell: continue\n #--Objects\n objects = cell.getObjects()\n for object in objects.list():\n #--Different mod?\n if object[0] != iMod:\n pass\n #--Cell deleted?\n elif cellObjMap == -1:\n objects.remove(object)\n #--Remapped object?\n elif object[1] in cellObjMap:\n (newIObj,objId) = cellObjMap[object[1]]\n objIdBase = reObjNum.sub('',objId) #--Strip '00001234' id num from object\n #--Mismatched object id?\n if objId != objIdBase:\n #print 'Mismatch:',object[:3]\n pass \n #--Deleted object?\n elif newIObj == -1:\n #print 'Deleted',object[:3]\n objects.remove(object)\n #--Remapped object?\n else:\n #print 'Remapped',object[:3],'to',newIObj\n newObject = self.remapObject(object,iMod,newIObj)\n objects.replace(object,newObject)\n self.updateScptRefs()\n #--Remap iMods\n if not modMapKeys: return\n for cell in self.cells:\n objects = cell.getObjects()\n for object in objects.list():\n #--Remap IMod\n iMod = object[0]\n #--No change?\n if iMod not in modMapKeys: \n pass\n #--Object deleted?\n elif modMap[iMod] == -1:\n objects.remove(object)\n #--Object not deleted?\n else:\n newObject = self.remapObject(object,modMap[iMod])\n objects.replace(object,newObject)\n self.updateScptRefs()", "def refine(self): # pylint: disable=R0201\n return True", "def __init__(\n self,\n locus_tag: str,\n gene_type: str,\n location: Union[FeatureLocation, CompoundLocation],\n name: str,\n reference_sequence: Seq,\n cog: str = None,\n y_ome: str = None,\n essential: bool = False,\n replication_strand: str = None,\n origin_distance: int = None,\n terminus_distance: int = None\n ):\n\n super().__init__('gene', location=location, reference_sequence=reference_sequence, name=name)\n self.reading_frame = get_reading_frame(self.location, len(reference_sequence))\n\n # if the gene is a coding sequence, it should have a multiple of 3 length; sequence is set by super init\n if gene_type == 'CDS' and len(self.location) % 3 != 0:\n raise ValueError(locus_tag + ': sequence should have multiple of 3 length if gene is coding')\n\n self.locus_tag = locus_tag\n self.gene_type = gene_type\n self.cog = cog\n self.y_ome = y_ome\n self.essential = essential\n self.replication_strand = replication_strand\n self.origin_distance = origin_distance\n self.terminus_distance = terminus_distance\n \n # only set by add_regulon_db_gene_ids\n self.id = None\n\n # only set after calculate_and_add_cai is run\n self.cai = None\n\n # only set after the appropriate linking functions are run\n self.protein = None\n self.trna = None\n self.transcription_units = []\n self.attenuators = []\n self.riboswitches = []\n self.shine_dalgarno = None\n self.i_modulons = []", "def fromGenomeRepresentation(self,genome):\n self.clear()\n #print(genome)\n tokens = genome.split(\"||\")\n self.setAxiomFromString(tokens[0])\n self.setIterations(int(tokens[1]))\n for i in range(2,len(tokens)): self.addProductionFromGenomeRepresentation(tokens[i])" ]
[ "0.63167685", "0.63009715", "0.62039506", "0.60391605", "0.5989629", "0.580947", "0.5797044", "0.5579908", "0.5506126", "0.5501652", "0.5439127", "0.54250365", "0.5421335", "0.5421209", "0.5415419", "0.5400045", "0.5384445", "0.53612787", "0.5360659", "0.5338628", "0.5326598", "0.5267285", "0.5259004", "0.52573365", "0.52154595", "0.51732326", "0.51732326", "0.51732326", "0.51659495", "0.5146133", "0.51217264", "0.5114201", "0.51115304", "0.5079434", "0.5075999", "0.5060804", "0.5057662", "0.505726", "0.5050674", "0.5047666", "0.5047666", "0.5038679", "0.5027599", "0.5017093", "0.50083613", "0.49937823", "0.49849874", "0.49709076", "0.49672082", "0.4959547", "0.49510905", "0.49510226", "0.49418533", "0.49410427", "0.49232328", "0.49139506", "0.4908437", "0.4893635", "0.48899898", "0.4880612", "0.4873191", "0.48728704", "0.48505864", "0.4850016", "0.4849835", "0.48454213", "0.4842338", "0.48363462", "0.4835189", "0.4834145", "0.48183978", "0.48123688", "0.48102513", "0.48040268", "0.48021996", "0.47928134", "0.4791444", "0.47899526", "0.4785557", "0.47828388", "0.4782793", "0.47819734", "0.47812623", "0.47749725", "0.4770109", "0.47660735", "0.4762974", "0.47590238", "0.47435144", "0.4741376", "0.47299457", "0.47294453", "0.47291008", "0.4723753", "0.47221193", "0.47218472", "0.471945", "0.4718755", "0.47115818", "0.4699803", "0.46952504" ]
0.0
-1
return 0 if they equal, 1 if snp1 less, else 1
def compare(chr1, pos1, chr2, pos2): pos1 = int(pos1) pos2 = int(pos2) if chrsort == 'version': chr1 = mapChrForVersion(chr1) chr2 = mapChrForVersion(chr2) elif chrsort == 'natural': pass # use original chr1, chr2 else: chr1 = chrsort.get(chr1, chr1) chr2 = chrsort.get(chr2, chr2) return -1 if (chr1, pos1) < (chr2, pos2) else 1 if (chr1, pos1) > (chr2, pos2) else 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def judge(n1: List[int], n2: List[int]) -> int:\n\tmin_length = min(len(n1),len(n2))\n\tpass_count = 0\n\tfor i in range(min_length):\n\t\tpass_count += get_lowest_16(n1[i]) == get_lowest_16(n2[i])\n\n\treturn pass_count", "def compareones_c(w1,w2,tn):\n nw1 = np.int_(np.copy(w1))\n nw2 = np.int_(np.copy(w2))\n code = \"\"\"\n int s;\n s = 0;\n for(int i = 0; i < n; i++)\n {\n if((nw1[i] == 1)&(nw2[i] == 1))\n {\n s += 100;\n }\n }\n return_val = s;\n \"\"\"\n n = len(w2)\n res = inline(code, ['nw1','nw2','n'], headers = ['<math.h>'], compiler = 'gcc')\n return res / float(tn)", "def score(c1, c2):\n if c1 == c2:\n return 1\n else:\n return 0", "def compare(cls, s1, s2=None):\n if s2 is None:\n s = s1\n else:\n s = cls.multiply(s1, s2) # is self the class here\n result = 0\n result += cls.VERT_Z if reduce(lambda v, q: q^v, s.qubit_line('col', inbetween='z')) & 1 else 0\n result += cls.HOR_Z if reduce(lambda v, q: q^v, s.qubit_line('row', inbetween='z')) & 1 else 0\n result += cls.VERT_X if reduce(lambda v, q: q^v, s.qubit_line('col', inbetween='x')) & 2 else 0\n result += cls.HOR_X if reduce(lambda v, q: q^v, s.qubit_line('row', inbetween='x')) & 2 else 0\n return result", "def _same(p1,p2,prec=0.0001):\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True", "def checarPs(self,p1,p2):\n return abs(p1-p2) < 0.00001", "def are_equal(self, sp1, sp2):\n return", "def are_equal(self, sp1, sp2):\n return True", "def compare_zipcodes(s1, s2):\n\n # check if the zipcode are identical (return 1 or 0)\n sim = (s1 == s2).astype(float)\n\n # check the first 2 numbers of the distinct comparisons\n sim[(sim == 0) & (s1.str[0:2] == s2.str[0:2])] = 0.5\n\n return sim", "def evaluate(self) :\n if self.inStates[0].getState() == self.inStates[1].getState(): return 0\n return 1", "def compare(self) -> int:", "def lamport_compare(ts1, ts2):\n time1 = parse_op_id(ts1)\n time2 = parse_op_id(ts2)\n if time1.counter != time2.counter:\n return time1.counter - time2.counter\n if time1.actorId != time2.actorId:\n return 1 if time1.actorId > time2.actorId else -1\n return 0", "def test_psnr_with_two_completely_different_sets(self):\n low = np.zeros((10, 500, 500, 1), dtype=np.uint8)\n high = np.ones((10, 500, 500, 1), dtype=np.uint8) * 255\n\n avg_psnr = np.array(psnr(high, low)).mean()\n self.assertEqual(avg_psnr, 0.0)", "def indicator(self, a, b):\n return self.n_score(a == b)", "def check(self):\n if (sum(self.state) == 0):\n return -1\n elif (self.state[-1] >= 1):\n return 1\n else:\n return 0", "def fn(x):\n k = 0 \n for i, ch in enumerate(s): \n if mp.get(i, inf) < x: continue \n if k < len(p) and ch == p[k]: k += 1\n return k == len(p)", "def assembly_compare(x, y) :\n if x.kinf() < y.kinf() :\n return 1\n elif x.kinf() == y.kinf() :\n return 0\n else : #x.resultType < y.resultType\n return -1", "def __le__(self, rs):\n Number.comparisons += 1\n result = self.data <= rs.data\n return result", "def compare_spectrum(spectrum0, spectrum1):\n title0 = spectrum0.get_title() \n title1 = spectrum1.get_title() \n if(title0 < title1): \n return -1\n elif(title0 > title1): \n return 1\n else:\n return 0", "def judge(genA: typing.Iterator[int], genB: typing.Iterator[int], steps: int) -> int:\n res = 0\n for na, nb in it.islice(zip(genA, genB), steps):\n la, lb = lower16(na), lower16(nb)\n if la == lb:\n res += 1\n return res", "def lps(mask):\n if not mask: return 0\n if not mask & (mask-1): return 1\n lo = int(log2(mask & ~(mask-1))) # least significant set bi\n hi = int(log2(mask)) # most significant set bit \n if s[lo] == s[hi]: return 2 + lps(mask^(1<<lo)^(1<<hi))\n return max(lps(mask^(1<<lo)), lps(mask^(1<<hi)))", "def compare(predictions, truth):\n comp = predictions - truth\n return 1 - (np.count_nonzero(comp) / len(predictions))", "def get_nTruePositive(atrank, was_retrieved, gt_ranks):\n TP = (np.logical_and(was_retrieved, gt_ranks <= atrank)).sum()\n return TP", "def _transition_probability(self, s, a, s1):\n unreachable_states = [4, # F with prod_score == 4\n 5] # M with prod_score == 0\n\n if s1 in unreachable_states:\n return 0\n else:\n return 1 / (self.n_states - len(unreachable_states))", "def prior(old_params,params):\n \n for s in range(len(params)):\n if params[s] < 0.0 or params[s] > 2:\n return 0\n return 1", "def get_identical_score(bin1,bin2=None):\n if bin2==None: bin2=[]\n tmpscore=0.0\n norm=0\n for ali1 in bin1:\n tmpscore+=get_subscore(ali1,ali1)\n norm+=1\n for ali2 in bin2:\n tmpscore+=get_subscore(ali2,ali2)\n norm+=1\n return tmpscore/norm", "def compare_addresses(s1_1, s1_2, s2_1, s2_2):\n\n return ((s1_1 == s2_1) | (s1_2 == s2_2) | (s1_1 == s2_2) | (s1_2 == s2_1)).astype(float)", "def psnr(gt, pred):\n return compare_psnr(gt, pred, data_range=gt.max())", "def psnr(gt, pred):\n return compare_psnr(gt, pred, data_range=gt.max())", "def gateCompare(self,gate1,gate2):\n \n if gate1.getDist()>gate2.getDist():\n return 1\n elif gate1.getDist()==gate2.getDist():\n return 0\n else: #gate1Num<gate2Num\n return -1", "def Q1_test():\n A, p1, p2 = [0,0], [2,4], [6,5]\n return (distance(A,p1) > 4.472135) and (distance(p1,p2) < 4.472136)", "def cal_psnr(im1, im2):\n # assert pixel value range is 0-255 and type is uint8\n mse = ((im1.astype(np.float) - im2.astype(np.float)) ** 2).mean()\n psnr = 10 * np.log10(255 ** 2 / mse)\n return psnr", "def judge(self, s1, s2):\n if len(s2) < len(s1):\n return False\n index_of_s1 = 0\n index_of_s2 = 0\n while index_of_s1 < len(s1) and index_of_s2 < len(s2):\n if s1[index_of_s1] == s2[index_of_s2]:\n index_of_s1 += 1\n index_of_s2 += 1\n else:\n index_of_s2 += 1\n return True if index_of_s1 == len(s1) else False", "def initial_conditions_2(x):\n u1 = 1\n return u1 if 1 <= x <= 2 else 0", "def sgn(x) -> int:\n if x > 0:\n return 1\n if x < 0:\n return -1\n return 0", "def g_minority_1_dev(by_grps):\n if by_grps[0][0]==by_grps[0][1]:\n print(\"Failed g_1dev_t2 -- small groups match\")\n return False\n \n cts = 0\n ctn = 0\n cto = 0\n big_letter= \"\"\n \n for item in by_grps[1]:\n if item==\"S\":\n cts+=1\n if item==\"N\":\n ctn+=1 \n if item==\"O\":\n cto+=1\n if(cts==4 or ctn==4 or cto ==4):\n pass\n else:\n print(\"Failed g_1dev_t2 -- no large group consistency\")\n return False\n \n if(cts==4):\n big_letter = \"S\"\n if(cto==4):\n big_letter = \"O\"\n if(ctn == 4):\n big_letter = \"N\"\n \n for item in by_grps[0]:\n if(item==big_letter):\n print(\"Faield g_1dev_t2 -- a small group member and large group letter are the same\")\n return False\n print(\"Confirmed g_1dev_t2 -- small group with 1 deviancy and large group are different\")\n return True", "def successes(predictions,truth):\n\ttotal = len(predictions)\n\tcorrect = 0.0\n\tfor p in predictions:\n\t\tif p == truth:\n\t\t\tcorrect += 1\n\t\telse:\n\t\t\tprint truth,\"\\t\",p\n\treturn correct", "def true_positive(y_true, y_pred):\n \n # initialize\n tp = 0\n for yt, yp in zip(y_true, y_pred):\n if yt == 1 and yp == 1:\n tp += 1\n return tp", "def are_equal(self, sp1, sp2):\n return sp1 == sp2", "def compare(seq1, seq2):\n if seq1 == seq2:\n return 1\n len_diff = len(seq1) / len(seq2)\n if len_diff > 1:\n len_diff = 1 / len_diff\n\n ngrams1 = {tuple(ng) for ng in get_all_ngrams(seq1)}\n ngrams2 = {tuple(ng) for ng in get_all_ngrams(seq2)}\n\n overall = len(ngrams1 & ngrams2) / len(ngrams1 | ngrams2)\n if overall == 1 or overall == 0:\n return overall\n\n try:\n max_match = len(max(ngrams1 & ngrams2, key=len)) / len(seq1)\n except ValueError:\n return 0\n\n return (len_diff + max_match + overall) / 3", "def is_SNP(count):\n counts = sum(count)\n return counts and float(counts - max(count)) / counts > MAX_NOISE", "def compare_snp_calls(gt_call, vcf_call):\n if vcf_call == 'NA':\n print(vcf_call)\n return 'NA'\n\n if not set('ACGT ').issuperset(gt_call):\n return 'NA'\n\n allele_freq, ref_allele, alt_allele = vcf_call.split(':')\n\n if gt_call == '{0} {1}'.format(ref_allele, ref_allele):\n gt_call = 0\n\n elif gt_call == '{0} {1}'.format(alt_allele, alt_allele):\n gt_call = 1\n\n elif ref_allele not in gt_call and alt_allele not in gt_call:\n gt_call = 0.5\n\n # 'If alleles do not match, e.g. genotyping gives C/T and SNP calling A/G!'\n else:\n print('')\n print(gt_call)\n print('{0} {1}'.format(ref_allele, ref_allele))\n print('{0} {1}'.format(alt_allele, alt_allele))\n print('')\n return 'NA'\n\n diff = gt_call - float(allele_freq)\n return diff", "def kangaroo(x1, v1, x2, v2):\n # 0 3 4 2\n\n if v2 >= v1: # We know x1 is always less than x2, so a faster x2 kangaroo means they will never meet\n return \"NO\"\n else:\n d1 = x1\n d2 = x2\n t = 0\n\n while d1 < d2:\n t += 1 # good thing to track, in case it is needed in a real use case\n d1 += v1\n d2 += v2\n if d1 == d2:\n return \"YES\"\n\n return \"NO\"", "def __get_score(self):\n for pair in zip(self.nu[self.nu_idx:], self.sw[self.sw_idx:]):\n if pair[0] == pair[1]:\n self.score += 1\n else:\n break", "def compareAUTOR(offense1, offense2):\n \n if (offense1 == offense2):\n return 0\n elif (offense1 > offense2):\n return 1\n else:\n return -1", "def Sgn(num):\n n = float(num)\n if n < 0:\n return -1\n elif n == 0:\n return 0\n else:\n return 1", "def _check_RS(self,suits_frequency):\n if len(np.array(suits_frequency.loc[suits_frequency==5])) == 1:\n RS=1\n else:\n RS=0\n\n return RS", "def _cmp(x, y):\n if x[1].count > y[1].count:\n return CmpRelation.GREATER\n if x[1].count < y[1].count:\n return CmpRelation.LESS\n if x[1].ptn_length < y[1].ptn_length:\n return CmpRelation.GREATER\n if x[1].ptn_length > y[1].ptn_length:\n return CmpRelation.LESS\n return CmpRelation.EQUAL", "def check(s1):\n chars = [0] * 128\n for c in s1:\n chars[ord(c)]+=1\n\n counter = 0\n for i in range(len(chars)):\n if chars[i] %2 != 0:\n counter+=1\n \n return counter <= 1", "def check_prize(correct_num):", "def overUseRes(res1, res2):\n \n for i in range(len(res1)):\n if res1[i] > res2[i]:\n return True\n return False", "def jaccard_sim(self, s1, s2):\n set1, set2 = set(s1.split()), set(s2.split())\n num = len(set1.intersection(set2))\n denom = len(set1.union(set2))\n return num / denom if denom != 0 else 0", "def __le__(self, other):\n return int(self.rank) <= int(other.rank)", "def haveEncountered(self,mono1,mono2,eps): \n return self.distance(mono1,mono2) < eps", "def new_binomial_prefactor(s,l1,l2,PAx,PBx):\n with loops.Scope() as L:\n L.total = 0.\n L.t = 0\n for _ in L.while_range(lambda: L.t < s + 1):\n #TEMP TODO rewrite this. The cond_range causes a huge overhead.\n # Try Valeev implementation\n for _ in L.cond_range(((s - l1) <= L.t) & (L.t <= l2)):\n L.total += binomials[l1,s-L.t] * binomials[l2,L.t] * PAx[l1-s + L.t] * PBx[l2 - L.t]\n L.t += 1\n return L.total", "def firstMissingPositive(self, nums):\n nums.sort()\n res = 1\n for num in nums:\n if num == res:\n res += 1\n return res", "def SNR(op0, op1):\n result = len(op0)*np.abs(np.mean(op1) - np.mean(op0))**2/((np.var(op1)+np.var(op0))/2)\n \n return result", "def are_equal(self, sp1, sp2):\n for s1 in sp1.keys():\n spin1 = getattr(s1, \"spin\", 0)\n oxi1 = getattr(s1, \"oxi_state\", 0)\n for s2 in sp2.keys():\n spin2 = getattr(s2, \"spin\", 0)\n oxi2 = getattr(s2, \"oxi_state\", 0)\n if (s1.symbol == s2.symbol and oxi1 == oxi2 and\n spin2 == -spin1):\n break\n else:\n return False\n return True", "def compare(l1, l2):\n if link_weights[l1] < link_weights[l2]:\n return 1\n elif link_weights[l1] == link_weights[l2]:\n return 0\n else:\n return -1", "def fullCmpSets(s1, s2):\n if len(s1) != len(s2):\n return 1\n for s1i, s2i in map(None, s1, s2):\n f = s1i.fullCmp(s2i)\n if f:\n return f", "def arsenalResults(dat):\n arsScore = int(dat[0])\n othScore = int(dat[2])\n if arsScore > othScore:\n res = 1\n elif arsScore == othScore:\n res = 2\n else:\n res = 0\n return res", "def get_hand1_wins(p1_hand, p1_rank, p1_rank_value, p2_hand, p2_rank, p2_rank_value):\n if HAND_RANKS.index(p1_rank) > HAND_RANKS.index(p2_rank):\n return 1\n elif HAND_RANKS.index(p1_rank) < HAND_RANKS.index(p2_rank):\n return 0\n\n # Ranks are equal\n if p1_rank_value > p2_rank_value:\n return 1\n elif p1_rank_value < p2_rank_value:\n return 0\n\n # Ranks and rank values are equal, go by highest card until one hand wins\n for i in range(0, 5):\n val1 = VALUES[p1_hand[i][0]]\n val2 = VALUES[p2_hand[i][0]]\n if val1 > val2:\n return 1\n elif val1 < val2:\n return 0\n\n print \"WTF\"\n return 0", "def check(self):\n\n if (sum(self.game_state) == 0):\n return 1\n elif (self.game_state[-1] >=1 ):\n return -1\n else:\n return 0", "def compare(self, other):\n return len(self & other) / max(len(self | other), 1)", "def samesign ( a , b ) :\n return ( 0 < a and 0 < b ) or ( 0 > a and 0 > b )", "def evaluate_binary_consistency(self):\n\n change_rw = 0\n change_sm = 0\n th = [0.005]\n for threshold in th:\n raw_th = [self.rw_data[t] > threshold for t in range(0, self.T)]\n smooth_th = [self.smth_data[t] > 0 for t in range(0, self.T)]\n # print(\"Zeros rw:\", get_avg_zeros_per_row(raw_th))\n # print(\"Zeros sm:\", get_avg_zeros_per_row(self.smth_data))\n change_rw = change_rw + self.change_of_network_over_time(raw_th)\n change_sm = change_sm + self.change_of_network_over_time(smooth_th)\n\n change_rw = change_rw / len(th)\n change_sm = change_sm / len(th)\n\n return change_rw, change_sm", "def jaccard(sl, sr):\n\tunion = sl | sr\n\tif not union:\n\t\treturn 0.0\n\treturn len(sl & sr) / float(len(union))", "def compare (v1, v2):\n v1_norm = normalize(v1)\n v2_norm = normalize(v2)\n if v1_norm < v2_norm:\n return -1\n if v1_norm > v2_norm:\n return 1\n return 0", "def __le__(self, other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n if(self.numerator<=other.numerator):\n return True\n else:\n return False", "def is_swap(score0, score1):\n # BEGIN PROBLEM 5\n s0_is_double_s1 = score0 // 2 == score1 and score0 % 2 == 0\n s1_is_double_s0 = score1 // 2 == score0 and score1 % 2 == 0\n return s0_is_double_s1 or s1_is_double_s0\n # END PROBLEM 5", "def check_equal_rsa_pub_key(sk2_, sk_):\n pub_n = sk_.public_numbers()\n pub_n2 = sk2_.public_numbers()\n\n self.assertEqual(pub_n2.e, pub_n.e)\n self.assertEqual(pub_n2.n, pub_n.n)", "def read_pairwise_ld(snp1, snp2):\n\tif snp1.rsID == snp2.rsID:\n\t\treturn 1\n\tif snp1.rsID in r2_cache and snp2.rsID in r2_cache:\n\t\treturn r2_cache[snp1.rsID][snp2.rsID]\n\telse:\n\t\treturn 0", "def is_over(self, state: StonehengeState) -> bool:\n total_result = state.hori_result + state.left_result + state.right_result\n total_line = len(total_result)\n p1_taken = 0\n p2_taken = 0\n # all_taken = True\n for item in total_result:\n if item == '1':\n p1_taken+=1\n elif item =='2':\n p2_taken += 1\n # else:\n # all_taken = False\n # print('p1 taken:' + str(p1_taken))\n # print('p2 taken:' + str(p2_taken))\n # print('p1_taken more than half?')\n # print(float(p1_taken) >= total_line/2)\n # print('p2_taken more than half?')\n # print(float(p2_taken) >= total_line/2)\n return float(p1_taken) >= total_line/2 or float(p2_taken) >= total_line/2", "def get_eqn(p0, p1):\n m = (p0[1] - p1[1]) / (p0[0] - p1[0])\n return (m, p0[1] - m * p0[0])", "def helper(s1,s2):\n # start with string of smaller len\n if len(s1) > len(s2): # swap\n temp = s1\n s1 = s2\n s2 = temp\n\n ans = 0\n\n # build two counters\n counter1 = collections.Counter(s1)\n counter2 = collections.Counter(s2)\n for item in counter1:\n ans += min(counter1[item],counter2[item])\n\n return ans", "def p(x):\n if x<0 or x>1:\n return 0\n else:\n return 1", "def compare(x, y):\n if x >= y:\n return 1.0\n else:\n return 0.0", "def fn(i, s0, s1, c0, c1):\n if s0 > n or s1 > n: return 0 # impossible \n if i == len(balls): return int(c0 == c1)\n ans = 0 \n for x in range(balls[i]+1): \n ans += fn(i+1, s0+x, s1+balls[i]-x, c0+(x > 0), c1+(x < balls[i])) * comb(balls[i], x)\n return ans", "def compare_sid(x_true, x_pred):\n x_true, x_pred = x_true.astype(np.float32), x_pred.astype(np.float32)\n N = x_true.shape[2]\n err = np.zeros(N)\n for i in range(N):\n err[i] = abs(np.sum(x_pred[:, :, i] * np.log10((x_pred[:, :, i] + 1e-3) / (x_true[:, :, i] + 1e-3))) +\n np.sum(x_true[:, :, i] * np.log10((x_true[:, :, i] + 1e-3) / (x_pred[:, :, i] + 1e-3))))\n return np.mean(err / (x_true.shape[1] * x_true.shape[0]))", "def lrt_2_pval(self, lrt):\n ind = (self.val < lrt)[::-1].argmax()\n pval = 1-self.cumprob[ind]\n return pval", "def compareVersion(self, version1, version2):\n v1 = version1.split('.')\n v2 = version2.split('.')\n\n for x, y in zip(v1, v2):\n if int(x) > int(y):\n return 1\n elif int(x) < int(y):\n return -1\n\n # all prefixes are equal\n if len(v1) > len(v2):\n for num in v1[len(v2):]:\n if int(num) > 0:\n return 1\n elif len(v1) < len(v2):\n for num in v2[len(v1):]:\n if int(num) > 0:\n return -1\n return 0", "def _psnr(img1, img2):\n mse = np.mean((img1 - img2) ** 2)\n if mse == 0:\n return 100\n PIXEL_MAX = 1\n return (20 * math.log10(PIXEL_MAX)) - (10 * math.log10(mse))", "def is_ppc(C1, C2, i):\n c1, c2 = sorted(C1), sorted(C2)\n for k in range(len(c1)):\n if i <= c2[k]:\n # return False\n break\n if c1[k] != c2[k]:\n return False\n return True", "def npcr(mat1, mat2):\n\tnpcr = 0\n\tw, h = mat1.shape\n\tif mat1.shape != mat2.shape:\n\t\treturn -1\n\tfor i in range(w):\n\t\tfor j in range(h):\n\t\t\tif mat1[i,j] != mat2[i,j]:\n\t\t\t\tnpcr += 1\n\tnpcr /= (w*h)\n\treturn npcr*100", "def winner(strategy0, strategy1):\n score0, score1 = play(strategy0, strategy1)\n if score0 > score1:\n return 0\n else:\n return 1", "def winner(strategy0, strategy1):\n score0, score1 = play(strategy0, strategy1)\n if score0 > score1:\n return 0\n else:\n return 1", "def winner(strategy0, strategy1):\n score0, score1 = play(strategy0, strategy1)\n if score0 > score1:\n return 0\n else:\n return 1", "def quality_data(self, s):\n known_symbols = np.mod(range(176),48)>=32\n print('quality_data',np.sum(np.real(s[known_symbols])<0))\n success = np.sum(np.real(s[known_symbols])<0) < 20\n return success,0 ## no doppler estimate for data frames", "def p_value(set1, set2):\n\ts, p = stats.ttest_ind(set1, set2)\n\treturn p", "def complementary_score(s1, s2) -> int:\n aligner = Align.PairwiseAligner()\n aligner.mode = 'local'\n aligner.open_gap_score = -1000 # Don't want any gaps!\n # score = the number of complementary nucleotides between s1 and s2.\n return aligner.score(str(s1), str(s2.complement()))", "def evaluate(self) :\n for inp in self.inStates :\n if inp.getState() == 1 : return 1\n return 0", "def sat(nums: List[int]):\n a, b, c, n = nums\n return (a ** n + b ** n == c ** n) and min(a, b, c) > 0 and n > 2", "def test_integer(self):\n esnA = ESN(N_in,N_out,random_state=1)\n esnB = ESN(N_in,N_out,random_state=1)\n self._compare(esnA,esnB,should_be=\"same\")", "def compare_sentences(s1,s2):\n\n total_score=0;\n num_total=0;\n for k in s1:\n num_total+=1\n if k in s2:\n if s1[k]==s2[k]:\n score = 1;\n else:\n score = 0;\n else:\n score = 0;\n total_score += score;\n \n for k in s2:\n if k not in s1:\n num_total+=1\n\n logging.info(\"Scored %f out of %d\" % (total_score, num_total));\n\n if num_total==0:\n return 1;\n return float(total_score)/float(num_total)", "def minerr_cls(p1, p2):\n metric = p1 / p2\n res = np.zeros_like(p1)\n res[metric > 1] = 0\n res[metric <= 1] = 1\n return res", "def computePairBetweenness(self,s0,s1=None):\r\n if s1==None:s1=s0[1];s0=s0[0]; #if given only one argument it should be a two tuple\r\n \r\n #calculate PB\r\n #Note that PB(u,v)=PB(v,u)\r\n #Every path is counted only once for s0!=s1 because :\r\n #delta(v,s0,s1)>0 => delta(v,s1,s0)=0\r\n #delta(v,s1,s0)>0 => delta(v,s0,s1)=0\r\n result = 0.0\r\n for u in range(self._n):\r\n #calculate Delta(u,{s0,s1},*)\r\n dd1 = self._deltaDot[u,s1] * self.getDelta(u,s0,s1)\r\n #paths from s0 and s1 already counted in deltaDot matrix\r\n result += dd1\r\n #when s0=s1 every path is counted twice (in both directions)\r\n #if s0==s1: self._PB[s0][s1] /= 2\r\n return result", "def comparison(self, other):\n comparison = 0\n if self.races > other.races:\n comparison += 1\n elif self.races < other.races:\n comparison -= 1\n if self.wins > other.wins:\n comparison += 1\n elif self.wins < other.wins:\n comparison -= 1\n if len(self.drivers_championships_years) > len(other.drivers_championships_years):\n comparison += 1\n elif len(self.drivers_championships_years) < len(other.drivers_championships_years):\n comparison -= 1\n if len(self.constructors_championships_years) > len(other.constructors_championships_years):\n comparison += 1\n elif len(self.constructors_championships_years) < len(other.constructors_championships_years):\n comparison -= 1\n return comparison", "def _canonical_order(node_chunk_a: node_chunk, node_chunk_b: node_chunk) -> int:\n na, prec_a, slotsA = node_chunk_a\n nb, prec_b, slotsB = node_chunk_b\n\n # compare based on node precedence\n if prec_a > prec_b:\n return -1\n elif prec_b > prec_a:\n return 1\n\n # compare based on slots\n else:\n # slots are equivalent\n if slotsA == slotsB:\n return 0\n\n # a is subset of b\n aWithoutB = slotsA - slotsB\n if not aWithoutB:\n return 1\n\n # b is subset of a\n bWithoutA = slotsB - slotsA\n if not bWithoutA:\n return -1\n\n # compare based on slots\n aMin = min(aWithoutB)\n bMin = min(bWithoutA)\n return -1 if aMin < bMin else 1", "def atiecorrect(rankvals):\r\n sorted,posn = ashellsort(N.array(rankvals))\r\n n = len(sorted)\r\n T = 0.0\r\n i = 0\r\n while (i<n-1):\r\n if sorted[i] == sorted[i+1]:\r\n nties = 1\r\n while (i<n-1) and (sorted[i] == sorted[i+1]):\r\n nties = nties +1\r\n i = i +1\r\n T = T + nties**3 - nties\r\n i = i+1\r\n T = T / float(n**3-n)\r\n return 1.0 - T", "def evaluate(self) :\n for inp in self.inStates :\n if inp.getState() == 0 : return 0\n return 1", "def __ge__(self, other):\n return int(self.rank) >= int(other.rank)" ]
[ "0.6195848", "0.61714214", "0.5978105", "0.59495175", "0.58783364", "0.58137816", "0.5750367", "0.57387525", "0.56986475", "0.56565875", "0.5649709", "0.55772007", "0.55763227", "0.557493", "0.55728525", "0.5571845", "0.557098", "0.5570138", "0.5562759", "0.55625623", "0.5560324", "0.55452406", "0.5534755", "0.5527516", "0.5506003", "0.55004406", "0.5492515", "0.54799116", "0.54799116", "0.5478314", "0.5471997", "0.5465939", "0.5459837", "0.54578406", "0.5442059", "0.54408354", "0.5436002", "0.5418299", "0.54160005", "0.5403953", "0.54028726", "0.54022384", "0.5396006", "0.5394935", "0.5390424", "0.53886276", "0.53824157", "0.5337867", "0.5334057", "0.53279436", "0.5322588", "0.5320954", "0.53192383", "0.5316079", "0.5307573", "0.529541", "0.5294255", "0.52935714", "0.5291914", "0.52865744", "0.5271816", "0.5271694", "0.5262424", "0.52513856", "0.5250089", "0.5249864", "0.52498007", "0.5240176", "0.5234246", "0.5233502", "0.5232174", "0.5231767", "0.52248853", "0.52232474", "0.5216304", "0.5213496", "0.52127934", "0.52114123", "0.52061987", "0.5203657", "0.520005", "0.5197558", "0.51965725", "0.5195507", "0.5190003", "0.5190003", "0.5190003", "0.5185989", "0.5178887", "0.51786333", "0.5175977", "0.51684964", "0.5167848", "0.51640224", "0.5155705", "0.5155318", "0.51550895", "0.5151632", "0.51505876", "0.5135912", "0.5130843" ]
0.0
-1
Method which calculus game score through pss (player standard stats)
def set_game_score(self): bx = self.get_standard_stats() tcInt = bx["t2p_int"] + bx["t3p_int"] tcConv = bx["t2p_conv"] + bx["t3p_conv"] ft = bx["tl_int"] - bx["tl_conv"] ptos = bx["t2p_conv"]*2 + bx["t3p_conv"]*3 + bx["tl_conv"] #Con "%.2f" % round(x, 2) además de redondear a dos decimales, nos quedamos con los ceros finales result = "%.2f" % round(float(ptos) + (float(0.4)*float(tcConv)) - (float(0.7)*float(tcInt)) - (float(0.4)*float(ft)) + (float(0.7)*float(bx["reb_of"])) + (float(0.3)*float(bx["reb_def"])) + float(bx["steals"]) + (float(0.7)*float(bx["assists"])) + (float(0.7)*float(bx["block_shots"])) - (float(0.4)*float(bx["fouls_cm"])) - float(bx["turnovers"]), 2) self.game_score = "%.2f" % round(Decimal(result)/bx["games"], 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scoreEvaluationFunction(gameState):\n return gameState.getScore()", "def scoreEvaluationFunction(currentGameState):\r\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreGame(self):\n # create valueLs[card1,card2,...], pass it to sumHandReturnPoints(valueLs) or twoCardReturnPoints(valueLs)\n scoreLs = []\n ### Score of row\n for rowKey in self.table:\n valueLs = self.table[rowKey]\n points = self.sumHandReturnPoints(valueLs)\n scoreLs.append(points)\n\n ### Score of 4-card column\n for offset in range(0,3): # 0,1,2\n tmpLs = []\n for rowKey in self.table:\n valueLs = self.table[rowKey]\n if len(valueLs) == 5:\n iterStart = 1\n else:\n iterStart = 0\n card = valueLs[iterStart+offset]\n tmpLs.append(card)\n points = self.sumHandReturnPoints(tmpLs)\n scoreLs.append(points) \n\n ### Score of 2-card column\n #(1) 1st column\n valueLs1 = self.table['row1']\n valueLs2 = self.table['row2']\n tmpLs = []\n tmpLs.append(valueLs1[0].get_rank())\n tmpLs.append(valueLs2[0].get_rank())\n points = self.twoCardReturnPoints(tmpLs)\n scoreLs.append(points)\n #(2) 5th column\n valueLs3 = self.table['row1']\n valueLs4 = self.table['row2']\n tmpLs = []\n tmpLs.append(valueLs3[-1].get_rank())\n tmpLs.append(valueLs4[-1].get_rank())\n points = self.twoCardReturnPoints(tmpLs)\n scoreLs.append(points) \n\n ### Add up scoreLs\n sumPoints = 0\n for points in scoreLs:\n sumPoints += points\n return sumPoints", "def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def scoreEvaluationFunction(currentGameState):\n\treturn currentGameState.getScore()", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def score(self):", "def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)", "def disp_score():", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n return successorGameState.getScore()", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n return successorGameState.getScore()", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n return successorGameState.getScore()", "def score(game):\r\n result = 0\r\n roll = 0\r\n game = game + [0]*(21 - len(game))\r\n\r\n for frame in range(0, 10):\r\n if is_strike(game, roll):\r\n result += _score_strike(game, roll)\r\n roll += 1\r\n elif is_spare(game, roll):\r\n result += _score_spare(game, roll)\r\n roll += 2\r\n else:\r\n result += _score_frame(game, roll)\r\n roll += 2\r\n\r\n return result", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [\n ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n return successorGameState.getScore()", "def custom_score(game, player):\n \"\"\" custom_score heuristic function idea is to implement aggressive heuristic function \n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) # Calculate length of myPlayer moves\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player))) # Calculate length of opposite player moves same as custom score 2\n return float(length_my_player_moves - 1.5*length_opp_payer_moves)", "def custom_score(game, player):\n # return penalize_corners_heuristic(game, player)\n # return favor_run_away_heuristic(game, player)\n return look_ahead_heuristic(game, player)", "def custom_score(game, player):\n # TODO: finish this function!\n if game.is_winner(player): # check to see if player is in state winner\n #print(\"You win!\")\n return math.inf # abstraction of score, +inf equates to a win\n elif game.is_loser(player):\n #print(\"You lose!\")\n return -math.inf # abstraction of score, -inf equates to a loss\n\n # Opponent\n opponent = game.get_opponent(player)\n\n # Remaining spaces left on the board\n rem_spaces = len(game.get_blank_spaces())\n\n # number of agent's available moves\n no_moves = len(game.get_legal_moves(player))\n\n # number of opponent's available moves\n opp_moves = len(game.get_legal_moves(opponent))\n\n # evaluation of board \"goodness\"\n # using moves available to both players\n # Idea is player chooses moves with scores that maximise whilst minimizing\n # evaluate board states and positions as scores\n board_score = no_moves - opp_moves\n score = board_score/rem_spaces\n\n return float(score)", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n #newGhostStates = successorGameState.getGhostStates()\n #newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n score = 0.0\n base = 100.0\n\n #if get fodd then add score\n score += currentGameState.hasFood(newPos[0], newPos[1]) * base\n for food in newFood.asList():\n score -= base * (1 - math.exp(-1.0 * util.manhattanDistance(newPos, food)))\n \n #if too close to ghost then minus points\n GhostPos = successorGameState.getGhostState(1).getPosition()\n GhostScareTime = successorGameState.getGhostState(1).scaredTimer\n if util.manhattanDistance(newPos, GhostPos) < 2 and GhostScareTime ==0:\n score -= 1e100\n\n #Stop is not good for win so minus points\n if action == Directions.STOP:\n score -= base\n\n return score\n #return successorGameState.getScore()", "def custom_score(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n own_moves = game.get_legal_moves(player)\n\n return len(own_moves) / max(len(opp_moves), 1e-6)", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n oldFood = currentGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n\n return successorGameState.getScore()", "def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n #Between 1-8\n return own_moves / opp_moves", "def score(self):\n score_message = {\n 'Onewins': \"\\nThe Winner is Player 1!\",\n 'Twowins': \"\\nThe Winner is Player 2!\",\n 'Tie': \"\\nTie! Looks like everyone's a winner!\",\n 'Nowinner': \"\\nYikes, neither of you win!\"\n }\n if self.pone_score > self.ptwo_score:\n print(score_message['Onewins'])\n elif self.pone_score < self.ptwo_score:\n print(score_message['Twowins'])\n elif self.pone_score == 0 and self.ptwo_score == 0:\n print(score_message['Nowinner'])\n else:\n print(score_message['Tie'])", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = own_moves / opp_moves\n\n completeness = completeness_of_game(game)\n centerness_score = 0\n\n if completeness < 0.5:\n own_centerness = centerness(game, player)\n opp_centerness = centerness(game, game.get_opponent(player))\n centerness_ratio = own_centerness / opp_centerness + 0.1\n\n center_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def custom_score_2(game, player):\n # TODO: finish this function!\n if game.is_loser(player):\n #print(\"You lose!\")\n return -math.inf\n if game.is_winner(player):\n #print(\"You win\")\n return math.inf\n\n # center\n width = game.width / 2\n height = game.height / 2\n\n # Opponent\n opponent = game.get_opponent(player)\n opp_y_coord, opp_x_coord = game.get_player_location(opponent)\n opp_x_eval = (width - float(opp_x_coord)) ** 2\n opp_y_eval = (height - float(opp_y_coord)) ** 2\n opp_center_eval = float(opp_x_eval + opp_y_eval)\n\n # Remaining spaces left on the board\n rem_spaces = len(game.get_blank_spaces())\n\n # number of agent's available moves\n no_moves = len(game.get_legal_moves(player))\n\n # number of opponent's available moves\n opp_moves = len(game.get_legal_moves(opponent))\n\n # evaluation of board \"goodness\"\n # using moves available to both players\n # Idea is player chooses moves with scores that maximise whilst minimizing\n # evaluate board states and positions as scores\n opp_score = opp_moves * 2 - opp_center_eval\n score = no_moves - opp_score/rem_spaces\n return float(score)", "def score(entry):\n score = time_seconds(entry['Swim'])\n score += time_seconds(entry['Run'])\n score -= (int(entry['Push-ups']) * 2)\n score -= int(entry['Sit-ups'])\n score -= int(entry['Pull-ups']) * 6\n return score", "def compute_score(window, computer_piece):\n score = 0\n if window.count(computer_piece) == 4:\n score += 100\n elif window.count(computer_piece) == 3 and window.count(0) == 1:\n score += 5\n elif window.count(computer_piece) == 2 and window.count(0) == 2:\n score += 2\n if window.count(PLAYER_PIECE) == 2 and window.count(0) == 2:\n score -= 1\n if window.count(PLAYER_PIECE) == 3 and window.count(0) == 1:\n score -= 100\n return score", "def GetResult(self, playerjm):\n return self.score / len(self.scores)", "def score_game(self):\r\n players = self.player_control.get_players()\r\n ###game_control = self.game_control\r\n ###if game_control is not None:\r\n ### game_control.set_vals() # Update any changed game control settings\r\n if len(players) == 0:\r\n return # No players\r\n n_top_score = 0\r\n top_score = players[0].get_score()\r\n for player in players:\r\n if player.get_score() > top_score:\r\n top_score = player.get_score()\r\n for player in players:\r\n player_score = player.get_score()\r\n if player_score == top_score:\r\n n_top_score += 1\r\n \r\n for player in players:\r\n player_score = player.get_score()\r\n player_played = player.get_played()\r\n player_ties = player.get_ties()\r\n player_wins = player.get_wins()\r\n new_played = player_played+1\r\n player.set_played(new_played)\r\n player.set_prop(\"played\")\r\n if player_score == top_score:\r\n if n_top_score > 1:\r\n new_ties = player_ties + 1\r\n player.set_ties(new_ties)\r\n player.set_prop(\"ties\")\r\n else:\r\n new_wins = player_wins + 1\r\n player.set_wins(new_wins)\r\n player.set_prop(\"wins\")\r\n self.update_score_window()", "def pss(self):\n return (self.table[0, 0] * self.table[1, 1] - self.table[0, 1] * self.table[1, 0]) / \\\n ((self.table[0, 0] + self.table[1, 0]) * (self.table[0, 1] + self.table[1, 1]))", "def custom_score_7(game, player):\n \"\"\"custom_score_7 heuristic function also aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(1.5*length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n oldFood = currentGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n return successorGameState.getScore()", "def score(self,player, board):\r\n numPlayer = 0\r\n numOpp = 0\r\n for i in self.squares():\r\n if board[i] == player:\r\n numPlayer+= SQUARE_WEIGHTS[i]\r\n else:\r\n numOpp+=SQUARE_WEIGHTS[i]\r\n return numPlayer-numOpp", "def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)", "def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.move_count < 15:\n return center_modified_score(game, player)\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the base information to calculate player & opponent\n # feature values\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n if len(player_legal_moves) != len(opponent_legal_moves):\n return float(len(player_legal_moves) - len(opponent_legal_moves))\n \n # Get_center_coordinates and opponent. Then set the list of participants\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n participants = [player, opponent]\n \n # Then, for each participant obtain his/her feature values \n for participant in participants:\n if participant == player:\n p_legal_moves = player_legal_moves\n player_either = player\n participant_coordinates = p_y, p_x = \\\n game.get_player_location(participant)\n player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, \\\n player_path_count, player_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either,participant_coordinates, p_legal_moves)\n else:\n p_legal_moves = opponent_legal_moves\n player_either = opponent\n participant_coordinates = p_y, p_x \\\n = game.get_player_location(participant)\n opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, opponent_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either, participant_coordinates, p_legal_moves)\n \n # Place each participant's feature values in a tuple/vector surrogate \n pro_player_vector = \\\n (player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, player_path_count, \\\n opponent_min_center_diff)\n pro_opponent_vector = \\\n (opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, player_min_center_diff)\n \n # Provide a weighting vector for the features \n weight_vector = (1.5,0.1,1.0,0.001,0.001,0.001)\n \n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*(q-r ) for p,q,r \\\n in zip(weight_vector, pro_player_vector, pro_opponent_vector))\n \n return float(weighted_difference_dot_product)", "def custom_score_6(game, player):\n \"\"\"custom_score_6 heuristic function aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - 1.5*length_opp_payer_moves*length_opp_payer_moves)", "def utility(self, state, player):\n if state.isWin() or state.isLose():\n return state.getScore()\n\n # In case of cycle.\n if player == PACMAN:\n return INFINITY\n else:\n return -INFINITY", "def calculate_score(player_cards):\n score = sum(player_cards)\n return score", "def scoreEvaluationFunction(currentGameState, index):\n if currentGameState.isLose():\n return -float(\"inf\")\n \n if currentGameState.isWin():\n return float(\"inf\")\n\n # ghost function\n def ghostScore(gameState):\n if len(gameState.getGhostStates()) == 0:\n return 0\n score = 0\n newGhostPos = gameState.getGhostPositions()\n newGhostStates = gameState.getGhostStates()\n for pacman in gameState.getPacmanPositions():\n for i in range(len(newGhostPos)):\n if newGhostStates[i].scaredTimer > 0:\n score += ((max(4 - euclidDistance(pacman, newGhostPos[i]), 0)) ** 2)\n else:\n score -= ((max(4 - euclidDistance(pacman, newGhostPos[i]), 0)) ** 2)\n if manhattanDistance(pacman, newGhostPos[i]) < 2:\n return -float(\"inf\")\n \n return score\n \n # food function\n def foodScore(gameState):\n score = 0\n for pacman in gameState.getPacmanPositions():\n pacScore = []\n for foodCoord in gameState.getFood().asList():\n pacScore.append(euclidDistance(foodCoord, pacman))\n score = min(pacScore)\n \n score = score * -2\n score -= len(gameState.getFood().asList()) * 15\n return score\n\n\n totalScore = currentGameState.getScore()[0]\n totalScore += ghostScore(currentGameState) \n totalScore += foodScore(currentGameState)\n return totalScore", "def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n oldFood = currentGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n\n score = 10000\n if successorGameState.isWin():\n return 100000000\n for ghost in newGhostStates:\n ghostPos = ghost.getPosition()\n if util.manhattanDistance(ghostPos, newPos) < 2:\n score -= 10000\n else:\n score += util.manhattanDistance(ghostPos, newPos) * 1\n \n nearFood = 1000\n farFood = 1000\n for foodPos in oldFood.asList():\n dist = util.manhattanDistance(foodPos, newPos)\n if (dist < nearFood):\n nearFood = dist\n if (dist > farFood):\n farFood = dist\n if (currentGameState.getNumFood() < successorGameState.getNumFood()):\n score += 5\n\n if action == Directions.WEST:\n score -= 1\n if action == Directions.STOP:\n score -= 2\n \n for scareTime in newScaredTimes:\n score += scareTime * 1\n\n score -= 2 * farFood\n score -= 5 * nearFood\n capsuleplaces = currentGameState.getCapsules()\n if successorGameState.getPacmanPosition() in capsuleplaces:\n score += 5\n return max(score, 0)\n \n #their original return\n #return successorGameState.getScore()", "def __get_score(self):\n for pair in zip(self.nu[self.nu_idx:], self.sw[self.sw_idx:]):\n if pair[0] == pair[1]:\n self.score += 1\n else:\n break", "def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the improved score\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n improved = len(player_legal_moves) - len(opponent_legal_moves)\n if improved != 0:\n return float(improved)\n \n # Second get differences from center\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n player_coordinates = game.get_player_location(player)\n opponent_coordinates = game.get_player_location(opponent)\n player_center_dist = get_distances_from_center(center_coordinates, player_coordinates)\n opponent_center_dist = get_distances_from_center(center_coordinates, opponent_coordinates)\n center_dist_diff = player_center_dist - opponent_center_dist\n \n # Third obtain next_moves\n player_next_moves = [get_next_moves(game, move, list(move)) for move in player_legal_moves]\n opponent_next_moves = [get_next_moves(game, move, list(move)) for move in opponent_legal_moves] \n improved_next = len(player_next_moves) - len(opponent_next_moves)\n \n # Put player and opponent feature differences in a tuple/vector surrogoate\n feature_diff_vector = (improved, center_dist_diff, improved_next)\n \n # Provide a weighting vector for the features of each player-participant\n weight_vector = (1.5,0.1,1.0)\n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(feature_diff_vector, weight_vector))\n \n return float(weighted_difference_dot_product)", "def get_game_score(self):\n if self.game_is_tied():\n return 0\n elif self.is_game_won():\n my_available_steps = self.steps_available(self.loc)\n opp_available_steps = self.steps_available(self.opponent_loc)\n my_score = self.my_score - self.penalty_score if my_available_steps == 0 else self.my_score\n opp_score = self.opponent_score - self.penalty_score if opp_available_steps == 0 else self.opponent_score\n return (my_score - opp_score) / (abs(my_score) + abs(opp_score))\n else:\n if abs(self.my_score) + abs(self.opponent_score) == 0:\n return 0\n return (self.my_score - self.opponent_score) / (abs(self.my_score) + abs(self.opponent_score))", "def update_score():\n pass", "def game_score(self):\n score = self.score.quantize(Decimal('0.001'))\n return score if score > 0 else 0", "def getScore(self, gameState):\n\n if (self.red):\n return gameState.getScore()\n else:\n return gameState.getScore() * -1", "def stats(self):\n conversion, view = AB_test.unpack(self)\n prob_ctrl = self.control[0]/self.control[1]\n SE_ctrl = math.sqrt(prob_ctrl * (1 - prob_ctrl) / self.control[1])\n prob = np.zeros(len(conversion))\n SE = np.zeros(len(conversion))\n ZScore = np.zeros(len(conversion))\n pvalue = np.zeros(len(conversion))\n for i in range(len(conversion)):\n # probability is the conversion / view, \n prob[i] = conversion[i]/view[i]\n # SE is \\sqrt{ {p (1-p) \\over view} } in LaTeX\n SE[i] = math.sqrt(prob[i] * (1-prob[i]) / view[i])\n # Zscore is {p_{test} - p_{control} \\over SE_{test} ^ 2 + SE_{control} ^ 2}\n ZScore[i] = (prob[i] - prob_ctrl)/math.sqrt(SE[i] ** 2 + SE_ctrl **2)\n # pvalue is given as the cdf of the normal distribution funcion\n pvalue[i] = stats.norm.cdf(ZScore[i])\n self.prob = prob\n self.se = SE\n self.zscore = ZScore\n self.pvalue = pvalue\n return", "def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores", "def custom_score_3(game, player):\n # TODO: finish this function!\n if game.is_winner(player): # check to see if player is in state winner\n #print(\"You win!\")\n return math.inf # abstraction of score, +inf equates to a win\n elif game.is_loser(player):\n #print(\"You lose!\")\n return -math.inf # abstraction of score, -inf equates to a loss\n\n # center\n width = game.width / 2\n height = game.height / 2\n\n # Opponent\n opponent = game.get_opponent(player)\n y_coord, x_coord = game.get_player_location(player)\n x_eval = (width - float(x_coord)) ** 2\n y_eval = (height - float(y_coord)) ** 2\n center_eval = float(x_eval + y_eval)\n\n # Remaining spaces left on the board\n rem_spaces = len(game.get_blank_spaces())\n\n # number of agent's available moves\n no_moves = len(game.get_legal_moves(player))\n\n # number of opponent's available moves\n opp_moves = len(game.get_legal_moves(opponent))\n\n # evaluation of board \"goodness\"\n # using moves available to both players\n # Idea is player chooses moves with scores that maximise whilst minimizing\n # evaluate board states and positions as scores\n opp_score = opp_moves - center_eval\n score = no_moves - opp_score\n return float(score)", "def getScore(data):\n return score", "def vanilaScore(self,attended,state,W):", "def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n return __heuristic3__(game, player)", "def evaluate(state):\r\n if wins(state, COMP):\r\n score = +1\r\n elif wins(state, HUMAN):\r\n score = -1\r\n else:\r\n score = 0\r\n\r\n return score", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n \n \"*** YOUR CODE HERE ***\"\n\n final_score=0.0\n #for ghosts\n Ghost_dist= []\n for ghost in newGhostStates:\n Ghost_Position = ghost.getPosition() \n d=manhattanDistance(Ghost_Position, newPos)\n Ghost_dist.append(d)\n \n for i in Ghost_dist:\n factor=1\n if(i<=1):\n if(ghost.scaredTimer==0): \n final_score=final_score-200\n else:\n final_score=final_score + 1500\n factor=-1\n\n #for capsule\n capsule_state= currentGameState.getCapsules()\n capsule_dist= []\n for capsule in capsule_state:\n b=manhattanDistance(capsule,newPos)\n capsule_dist.append(b)\n\n for j in capsule_dist:\n if(b==0):\n final_score=final_score + 100\n else:\n final_score=final_score + (10.0/b)\n\n #for food\n Food= currentGameState.getFood() \n food_list = Food.asList()\n food_pos = []\n for k in food_list:\n a=manhattanDistance(k,newPos)\n food_pos.append(a)\n for i in food_pos:\n if(i==0):\n final_score=final_score + 100\n else:\n final_score=final_score + (1.0/(i**2))\n return final_score", "def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc", "def custom_score_3(game, player):\n \n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n player_legal_move_count, opponent_legal_move_count = \\\n len(player_legal_moves), len(opponent_legal_moves)\n move_count_difference = player_legal_move_count - opponent_legal_move_count\n # Find coordinates of center box\n h, w = get_center_coordinates(game)\n # Retrieve player's coordinates\n y, x = game.get_player_location(player)\n # Obtain coordinate further, closest to origin\n furthest_coord, closest_coord = max(h - y, w -x), min(h - y, w - x)\n # Return weighted, vector-valued length from origin / sum of weights\n weighted_distance_from_center = \\\n math.sqrt((closest_coord**2 + 2*(furthest_coord**2)))/3\n feature_vector = (move_count_difference, weighted_distance_from_center)\n \n weight_vector = (1.0,0.1)\n \n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(weight_vector, feature_vector)) \n \n return float(weighted_difference_dot_product)", "def utility(state):\n return state.getScore()", "def get_score(self, game_state):\n if self.red:\n return game_state.get_score()\n else:\n return game_state.get_score() * -1", "def calculate_player_position_score(marbles: list):\n prime = Evaluator.prime_positions\n good = Evaluator.good_positions\n position_score = 0\n for marble in marbles:\n if marble in prime:\n position_score += 10\n elif marble in good:\n position_score += 5\n else:\n position_score -= 1\n return position_score", "def custom_score_general(game, player, constants=[]):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n v = []\n\n if constants[0] != 0 or constants[2] != 0:\n own_moves = number_moves(game, player) / 8\n\n if own_moves == 0:\n return float(\"-inf\")\n\n v.append(own_moves)\n\n if constants[1] != 0 or constants[2] != 0:\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n\n if opp_moves == 0:\n return float(\"inf\")\n\n v.append(opp_moves)\n\n if constants[2] != 0:\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n v.append(move_ratio)\n\n if constants[3] != 0 or constants[5] != 0:\n own_openness = nearby_openness(game, player) / 80\n v.append(own_openness)\n\n if constants[4] != 0 or constants[5] != 0:\n opp_openness = nearby_openness(game, game.get_opponent(player)) / 80\n v.append(opp_openness)\n\n if constants[5] != 0:\n openness_ratio = (own_openness * 80) / (opp_openness + 0.0001 * 80) /80\n v.append(openness_ratio)\n\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n if constants[6] != 0 or constants[8] != 0:\n own_centerness = centerness(game, player) / centerness_max\n v.append(own_centerness)\n\n if constants[7] != 0 or constants[8] != 0:\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n v.append(opp_centerness)\n\n if constants[8] != 0:\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n return sum([x * y for x, y in zip(constants, v)])" ]
[ "0.68148273", "0.68119067", "0.6762667", "0.6762667", "0.6762667", "0.6762667", "0.6762667", "0.6762667", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.6716563", "0.6654658", "0.6628369", "0.6567261", "0.65515983", "0.6538091", "0.6504703", "0.64821315", "0.64821315", "0.64821315", "0.6481259", "0.6478393", "0.64666617", "0.6465129", "0.6462271", "0.6431659", "0.6431099", "0.6430386", "0.64295894", "0.6427708", "0.642411", "0.64185905", "0.6415086", "0.63960457", "0.63758636", "0.6364931", "0.6363011", "0.63545364", "0.63506764", "0.6347378", "0.632407", "0.6323092", "0.6317552", "0.63041025", "0.63031983", "0.6302785", "0.62975925", "0.6287927", "0.6284152", "0.6282239", "0.6280579", "0.62714636", "0.6268819", "0.6255092", "0.62508017", "0.62475055", "0.62436897", "0.62414646", "0.62262577", "0.62148684", "0.62141883", "0.6210203", "0.61953425", "0.6177278", "0.6173667", "0.6162166", "0.6159499", "0.6158116", "0.6119973" ]
0.6767508
2
Method which calculate DRE metric
def set_dre(self): bx = self.get_standard_stats() ptos = float(bx["t2p_conv"]*2 + bx["t3p_conv"]*3 + bx["tl_conv"]) fga = float(bx["t2p_int"] + bx["t3p_int"]) trb = float(bx["reb_def"] + bx["reb_of"]) d1 = ptos + (0.2*trb) + (1.7*float(bx["steals"])) + (0.535*float(bx["block_shots"])) + (0.5*float(bx["assists"])) d2 = (0.9*fga) + (0.35*float(bx["tl_int"])) + (1.4*float(bx["turnovers"])) + (0.136*float(bx["minutes"])) result = "%.2f" % round(d1-d2, 2) self.dre = "%.2f" % round(Decimal(result)/bx["games"], 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_metrics(self):\n pass", "def calculate(self) -> float:", "def calculate(self):", "def calculate_dataset_metrics(self):\n pass", "def calculateDataRate(self):\n pass", "def getMeasures():", "def denominator(self, ???):", "def calculate(self):\r\n\r\n pass", "def calculate(self):\r\n pass", "def compute_statistics(self):", "def measure(self, recommender):", "def _core_calc_degrad(self,bd,Ld) :\n\t\tdegrad = np.dot(Ld,bd) # Do matrix multiplication \n\t\tdegrad = np.exp(degrad) # Exponentiate to convert log to real\n\t\treturn degrad", "def get_metric(self) -> GreedyDiarizationErrorRate:\n return GreedyDiarizationErrorRate(collar=0.0, skip_overlap=False)", "def cost(self) -> float:", "def calculate(self):\n pass", "def metric(x,y):\n sm = x + y\n df = x - y\n div = sm / df if df != 0 else 0\n return \"sum is %s \" %sm, \"difference is %s \" %df, \"division of difference to sum is %s\" %div", "def measure(self):\n pass", "def _get_eval_metric(self):\n raise NotImplementedError", "def get_metric(self, data_row: pd.Series) -> float:", "def calc(self):\n return None", "def compute(self) -> Tuple[float, float, float]:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n self.statistics = {\n k: xm.mesh_reduce(k, v, np.sum) for k, v in self.statistics.items()\n }\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[int] = all_gather(self.statistics[key])\n value: int = sum(value)\n self.statistics[key] = value\n\n precision_value, recall_value, f1_value = get_binary_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n zero_division=self.zero_division,\n )\n return precision_value, recall_value, f1_value", "def metric(self, eles):\n point = eles.pop()\n dist = edist(point, self.soln)\n return dist", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def metric(x, y):\n d = x - y\n s = x + y\n print('difference is %g, sum is %g' % (d, s))\n if s == 0:\n return 0\n return d / s", "def calc_stat_values(self):", "def _compute_rmse(self, data):\n actual = data.rating.values\n pred = self._predict_all(data)\n rmse = np.sqrt(np.sum((actual - pred) **2) /len(pred))\n return rmse", "def make_metrics(self):\n num_batches = self.data_loader.number_of_batches()\n dose_score_vec = np.zeros(num_batches)\n\n # Only make calculations if data_loader is not empty\n if not self.data_loader.file_paths_list:\n print('No patient information was given to calculate metrics')\n else:\n # Change batch size to 1\n self.data_loader.batch_size = 1 # Loads data related to ground truth patient information\n if self.dose_loader is not None:\n self.dose_loader.batch_size = 1 # Loads data related to ground truth patient information\n\n for idx in tqdm.tqdm(range(num_batches)):\n # Get roi masks for patient\n self.get_constant_patient_features(idx)\n # Get dose tensors for reference dose and evaluate criteria\n reference_dose = self.get_patient_dose_tensor(self.data_loader)\n if reference_dose is not None:\n self.reference_dose_metric_df = self.calculate_metrics(self.reference_dose_metric_df, reference_dose)\n # If a dose loader was provided, calculate the score\n if self.dose_loader is not None:\n new_dose = self.get_patient_dose_tensor(self.dose_loader)\n # Make metric data frames\n self.new_dose_metric_df = self.calculate_metrics(self.new_dose_metric_df, new_dose)\n # Evaluate mean absolute error of 3D dose\n dose_score_vec[idx] = np.sum(np.abs(reference_dose - new_dose)) / np.sum(self.possible_dose_mask)\n # Save metrics at the patient level (this is a template for how DVH stream participants could save\n # their files\n # self.dose_metric_df.loc[self.patient_list[0]].to_csv('{}.csv'.format(self.patient_list[0]))\n\n if self.dose_loader is not None:\n dvh_score = np.nanmean(np.abs(self.reference_dose_metric_df - self.new_dose_metric_df).values)\n dose_score = dose_score_vec.mean()\n return dvh_score, dose_score\n else:\n print('No new dose provided. Metrics were only calculated for the provided dose.')", "def calculate(self):\n\n beta = 1 # or 0.5 or 2 can also calculate F2 or F0.5 measure\n\n beta_squared = beta * beta\n precision = Precision()\n precision.confusion_matrix = self.confusion_matrix\n precision = precision.calculate()\n recall = Recall()\n recall.confusion_matrix = self.confusion_matrix\n recall = recall.calculate()\n\n denominator = beta_squared * precision + recall\n\n if denominator != 0:\n return (1 + beta_squared) * ((precision * recall) / denominator)\n else:\n return 0", "def calculate_output(self):", "def accuracy(self):", "def advancedStats():", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n\n return tp / (tp + fp + fn)", "def eval_error_metric(predt, dtrain: xgb.DMatrix):\n label = dtrain.get_label()\n r = np.zeros(predt.shape)\n gt = predt > 0.5\n if predt.size == 0:\n return \"CustomErr\", 0\n r[gt] = 1 - label[gt]\n le = predt <= 0.5\n r[le] = label[le]\n return 'CustomErr', np.sum(r)", "def calc_reduction_diesel_used (self):\n self.reduction_diesel_used = self.diesel_equiv_captured - \\\n self.loss_heat_recovery\n #~ print 'self.reduction_diesel_used',self.reduction_diesel_used", "def rmdspe(self) -> float:\n return float(np.sqrt(np.median(np.square(self._percentage_error()))) * 100.0)", "def getMeasure(unique_name):", "def getMeasure(unique_name):", "def compute(self) -> Tuple[float, float, float]:\n # @TODO: ddp hotfix, could be done better\n if self._is_ddp:\n for key in self.statistics:\n value: List[float] = all_gather(self.statistics[key])\n value: float = sum(value)\n self.statistics[key] = value\n\n precision_value, recall_value, f1_value = get_binary_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n zero_division=self.zero_division,\n )\n return precision_value, recall_value, f1_value", "def calc_D(state):\n\t\tif t < thresh:\n\t\t\tstate.D_g[t] = 0.5\n\t\t\tstate.D_n[t] = 0.5\n\t\telse:\n\t\t\tif mod == \"constant\":\n\t\t\t\tstate.D_g[t] = D\n\t\t\t\tstate.D_n[t] = 1-D\n\t\t\tif mod == \"value\":\n\t\t\t\t# NOTE: if rmag and lmag is 1/0, can just use V\n\t\t\t\t# average of two actions\n\t\t\t\tV = np.mean(1/2*(state.QG[t,:] - state.QN[t,:])) # state average(?) \n\t\t\t\tV = 1/(1 + np.exp(-V*k)) # translate between 0 and 1\n\t\t\t\tstate.D_g[t] = V \n\t\t\t\tstate.D_n[t] = 1 - V\n\t\treturn state", "def patrimony_total(self):\n pass", "def dfr(self):\n return self.table[1, 0] / (self.table[1, 0] + self.table[1, 1])", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n\n return 1 - abs(fn - fp) / (2 * tp + fn + fp)", "def cost_derivative(self,output_results,y):\r\n\t\treturn (output_results-y)", "def compute_metric(inargs, exp_id, date):\n date_str = h.dt_to_yyyymmddhhmmss(date)\n fg_str = '_da' if inargs.fg else ''\n\n # Load presaved forecast data\n if exp_id in ['ey', 'rw']: # Make an exception for radar as exp_id\n radar_fn = (config.savedir_base + exp_id + '/prec_fields/' + exp_id +\n '_' + date_str + '.npy')\n fc_data = np.load(radar_fn)\n else:\n fc_fn = (config.savedir_base + exp_id + '/' +\n config.metric_dict[inargs.metric.split('-')[0]]['var'] +\n '_fields/' +\n config.metric_dict[inargs.metric.split('-')[0]]['det_or_ens'] +\n fg_str + '_' + date_str + '.npy')\n fc_data = np.load(fc_fn)\n\n if config.metric_dict[inargs.metric.split('-')[0]]['use_radar']:\n # Load presaved radar data\n radar_fn = (config.savedir_base + inargs.radar_comp + '/prec_fields/' +\n inargs.radar_comp + '_' + date_str + '.npy')\n radar_data = np.load(radar_fn)\n if inargs.combine_masks is not None:\n comb_fn = (config.savedir_base + inargs.combine_masks +\n '/prec_fields/' + inargs.combine_masks + '_' +\n date_str + '.npy')\n comb_data = np.load(comb_fn)\n else:\n comb_data = None\n radar_data, fc_data = h.handle_nans(radar_data, fc_data,\n inargs.radar_thresh,\n comb_data)\n\n if not inargs.upscale == 1:\n fc_data = h.upscale_fields(fc_data, inargs.upscale)\n if 'radar_data' in locals():\n radar_data = h.upscale_fields(radar_data, inargs.upscale)\n\n # Pass data to computation functions\n if inargs.metric in ['det_mean_prec', 'det_mean_cape', 'det_mean_cin']:\n m = h.compute_det_domain_mean(fc_data)\n elif inargs.metric in ['det_median_prec', 'det_median_cape',\n 'det_median_cin']:\n m = h.compute_det_domain_median(fc_data)\n elif inargs.metric == 'det_rmse':\n m = h.compute_det_rmse(radar_data, fc_data)\n elif 'det_sal' in inargs.metric:\n _, sal_thresh = inargs.metric.split('-')\n sal_thresh = float(sal_thresh) / 10.\n m = h.compute_det_sal(radar_data, fc_data, sal_thresh)\n elif 'det_fss' in inargs.metric:\n # Parse\n _, fss_thresh, fss_size = inargs.metric.split('-')\n fss_thresh = float(fss_thresh) / 10.\n fss_size = int(fss_size)\n # Update dictionary NOTE: This doesn't seem to work\n # config.metric_dict[inargs.metric.split('-')[0]]['ylabel'] = \\\n # 'FSS ' + str(fss_thresh) + 'mm/h ' + str(fss_size * 2.8) + 'km'\n m = h.compute_det_fss(radar_data, fc_data, fss_thresh, fss_size)\n elif inargs.metric == 'det_prec_hist':\n m = h.compute_det_prec_hist(fc_data)\n elif inargs.metric == 'ens_rmse':\n m = h.compute_ens_rmse(radar_data, fc_data)\n elif inargs.metric == 'ens_rmv':\n m = h.compute_ens_rmv(fc_data)\n elif inargs.metric == 'ens_crps':\n m = h.compute_ens_crps(radar_data, fc_data)\n elif 'ens_bs' in inargs.metric:\n # Parse\n s = inargs.metric.split('-')\n _, bs_thresh, bs_size = s\n bs_size = int(bs_size)\n bs_thresh = float(bs_thresh) / 10.\n # Update dictionary\n # config.metric_dict[inargs.metric.split('-')[0]]['ylabel'] = \\\n # 'BS ' + str(bs_thresh) + 'mm/h '\n m = h.compute_ens_bs(radar_data, fc_data, bs_thresh, bs_size)\n else:\n raise ValueError('Metric %s does not exist.' % inargs.metric)\n\n return m", "def performance_measure(self, x):\n # \"calculate performance measure\" \n pref = x.evaluate()\n return pref", "def evaluate(self):\n\n if self.opt['AccurateDFid']:\n DX = self.reconstruct()\n S = self.xstep.S\n dfd = (np.linalg.norm(self.xstep.W * (DX - S))**2) / 2.0\n if self.xmethod == 'fista':\n X = self.xstep.getcoef()\n else:\n X = self.xstep.var_y1()\n rl1 = np.sum(np.abs(X))\n return dict(DFid=dfd, RegL1=rl1,\n ObjFun=dfd + self.xstep.lmbda * rl1)\n else:\n return None", "def calculate_metrics(self):\n \n for cv in self.cat_vals:\n cat_inds = np.where(self.category_values == cv)[0]\n weighted_difference = (self.z[cat_inds]-self.mz[cat_inds])/self.weight_values[cat_inds]\n resid = np.sqrt(np.sum(np.square(weighted_difference))/(cat_inds.size))\n self.metric[str(cv)] = resid\n \n return self.metric", "def diarisationMetrics(reference, hypothesis, audioLength, collar_val = 0.5):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\")\r\n\r\n metrics = {}\r\n # for collar, skip_overlap, expName in [(0.0, False, 'NoCollarOverlap'),(collar_val, False, 'CollarOverlap'), (0.0, True, 'NoCollarNoOverlap'), (collar_val, True, 'CollarNoOverlap')]:\r\n for collar, skip_overlap, expName in [(collar_val, False, 'CollarOverlap'), (collar_val, True, 'CollarNoOverlap')]:\r\n diarizationErrorRate = DiarizationErrorRate(collar=collar, skip_overlap=skip_overlap)\r\n jaccardErrorRate = JaccardErrorRate(collar=collar, skip_overlap=skip_overlap)\r\n purity = DiarizationPurity(collar=collar, skip_overlap=skip_overlap)\r\n coverage = DiarizationCoverage(collar=collar, skip_overlap=skip_overlap)\r\n detectionAccuracy = DetectionAccuracy(collar=collar, skip_overlap=skip_overlap)\r\n\r\n print('*'*10, 'Collar: {}ms Skip Overlap: {}'.format(collar, skip_overlap), '*'*10)\r\n print(\"DER = {0:.5f}\".format(diarizationErrorRate(reference, hypothesis, uem=Segment(0, audioLength))))\r\n print(\"JER = {0:.5f}\".format(jaccardErrorRate(reference, hypothesis, uem=Segment(0, audioLength))))\r\n print(\"Optimal mapping = {}\".format(diarizationErrorRate.optimal_mapping(reference, hypothesis)))\r\n print(\"Purity = {0:.5f}\".format(purity(reference, hypothesis, uem=Segment(0, audioLength))))\r\n print(\"Coverage = {0:.5f}\".format(coverage(reference, hypothesis, uem=Segment(0, audioLength))))\r\n dtAcc = detectionAccuracy.compute_components(reference, hypothesis)\r\n # print(\"Detection Accuracy: FN = {:.5f}, FP = {:.5f}, TN = {:.5f}, TP = {:.5f}\\n\".format(dtAcc['false negative'],dtAcc['false positive'],dtAcc['true negative'],dtAcc['true positive']))\r\n\r\n metrics[expName]={}\r\n\r\n keys = ['DER','JER', 'mapping', 'purity', 'coverage', 'detectionAccuracy']\r\n values = diarizationErrorRate(reference, hypothesis, detailed=True, uem=Segment(0, audioLength)),\\\r\n jaccardErrorRate(reference, hypothesis, detailed=True, uem=Segment(0, audioLength)),\\\r\n diarizationErrorRate.optimal_mapping(reference, hypothesis),\\\r\n purity(reference, hypothesis, uem=Segment(0, audioLength)),\\\r\n coverage(reference, hypothesis, uem=Segment(0, audioLength)),\\\r\n detectionAccuracy.compute_components(reference, hypothesis)\r\n\r\n metrics[expName] = dict(zip(keys, list(values)))\r\n metrics = edict(metrics)\r\n return metrics", "def __dmr_analysis(self, debug=False):\n global hfAdcRange, adcCountRange\n if self.buffer is None or len(self.buffer) != self.bufsize:\n self.set_eval_error(f'Data length ({len(self.buffer)}) differs from expected ({self.bufsize})')\n return None\n\n It = self.buffer[:self.bufsize//2]\n Qt = self.buffer[self.bufsize//2:]\n\n self.results['ADC_I'] = hfAdcRange * (It * 2 / adcCountRange - 1)\n self.results['ADC_Q'] = hfAdcRange * (Qt * 2 / adcCountRange - 1)\n\n # subtract the DC component and convert to int32\n It = np.around(It-np.mean(It)).astype('int32')\n Qt = np.around(Qt-np.mean(Qt)).astype('int32')\n\n # find the bit error rate and constant symbol intervals\n maxlen = 20*200 # max length of returned Iref, Qref\n numerr, numbit, Iref, Qref, symlenref = get_ber(It, Qt, maxlen)\n\n if numerr is None or numbit is None:\n self.set_eval_error(f'Too small data length - {len(self.buffer)}')\n return None\n\n ber = numerr/numbit\n\n return ber", "def compute_metrics(self, results: list) -> dict:", "def compute_metric(df, preds):\n\n y = np.array(df['pressure'].values.tolist())\n\n # inspiratory phase\n mask = 1 - np.array(df['u_out'].values.tolist())\n\n # combine with mae calculusse\n mae = mask * np.abs(y - preds)\n mae = mae.sum() / mask.sum()\n\n return mae", "def calculate(self):\n\n specificity = self.confusion_matrix.tn / (self.confusion_matrix.tn + self.confusion_matrix.fp)\n\n false_positive_rate = 1 - specificity\n\n true_positive_rate = self.confusion_matrix.tp / (self.confusion_matrix.tp + self.confusion_matrix.fn)\n\n return (true_positive_rate - false_positive_rate + 1) / 2", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n n = self.confusion_matrix.n\n\n fn_tp = fn + tp\n fp_tp = fp + tp\n\n h1 = -((fn_tp / n) * math.log2(fn_tp / n) +\n (1 - fn_tp / n) * math.log2(1 - fn_tp / n))\n\n h2 = -((fp_tp / n) * math.log2(fp_tp / n) +\n (1 - fp_tp / n) * math.log2(1 - fp_tp / n))\n\n p00 = 1 if tn == 0 else (tn / n)\n p01 = 1 if fn == 0 else (fn / n)\n p10 = 1 if fp == 0 else (fp / n)\n p11 = 1 if tp == 0 else (tp / n)\n\n h12 = -((tn / n) * math.log2(p00) +\n (fn / n) * math.log2(p01) +\n (fp / n) * math.log2(p10) +\n (tp / n) * math.log2(p11))\n\n mi = h1 + h2 - h12\n\n vi = h1 + h2 - 2 * mi\n return vi", "def _derivativeTerm(self):\n\n\t\treturn self._Kd * (self._getErrorFunction() - self._previousError) / self._dt", "def calculations():\r\n\t\r\n\tpayload, avionics, booster = weight_input()\r\n\r\n\tdrogue_size, drogue_force = drogue_calc()\r\n\tmain_size, main_force = main_calc(avionics, booster, drogue_force) #total mass, payload detaches\r\n\r\n\tprint(\"Drogue is diameter is \" + str(drogue_size) + \" inches\")\r\n\tprint(\"Main is diameter is \" + str(main_size) + \" inches\")", "def compute_metrics(mat,language='English',method ='dimensional',output='data_frame'):\n language = language.lower()\n method = method.lower()\n if language == 'english':\n if method == 'dimensional':\n if output == 'data_frame':\n mat['NegCount'] = mat['DetectCount'] - mat['PosCount']\n mat['MeanNegVal'] = mat['NegVal'] / mat['NegCount']\n mat['MeanPosVal'] = mat['PosVal'] / mat['PosCount']\n mat['MeanArousal'] = mat['Arousal'] / mat['DetectCount']\n mat['MeanDominance'] = mat['Dominance'] / mat['DetectCount']\n mat['PosNegValDifference'] = mat['MeanPosVal'] - mat['MeanNegVal']\n mat['MeanValence'] = (mat['NegVal'] + mat['PosVal'])/ mat['DetectCount'] \n mat['AbsMeanNegVal'] = abs(mat['MeanNegVal'])\n mat['DetectPercent'] = mat['DetectCount'] / mat['TokenCount']\n mat['DensityValence'] =(mat['NegVal'] + mat['PosVal'])/ mat['TokenCount'] \n mat['DensityNegVal'] = mat['NegVal'] / mat['TokenCount']\n mat['DensityPosVal'] = mat['PosVal'] / mat['TokenCount']\n mat['DensityArousal'] = mat['Arousal'] / mat['TokenCount']\n mat['DensityDominance'] = mat['Dominance'] / mat['TokenCount']\n mat['MeanSquaredValence'] = mat['ValSq'] / mat['DetectCount']\n mat['ValenceDeviation'] = np.sqrt(mat['MeanSquaredValence'])\n return(mat)\n elif output == 'array':\n out_dict = {}\n out_dict['PosVal'] = mat[:,:,0]\n out_dict['NegVal'] = mat[:,:,1]\n out_dict['Arousal'] = mat[:,:,2]\n out_dict['Dominance'] = mat[:,:,3]\n out_dict['PosCount'] = mat[:,:,4]\n out_dict['DetectCount'] = mat[:,:,5]\n out_dict['TokenCount'] = mat[:,:,6]\n out_dict['ValSq'] = mat[:,:,7]\n\n out_dict['DetectPercent'] = np.divide(out_dict['DetectCount'],out_dict['TokenCount'])\n out_dict['NegCount'] = np.subtract(out_dict['DetectCount'],out_dict['PosCount'])\n # Mean Values:\n out_dict['MeanValence'] = np.divide(np.add(out_dict['PosVal'],out_dict['NegVal']),out_dict['DetectCount'])\n out_dict['MeanNegVal'] = np.divide(out_dict['NegVal'],out_dict['NegCount'])\n out_dict['MeanPosVal'] = np.divide(out_dict['PosVal'],out_dict['PosCount'])\n out_dict['MeanArousal'] = np.divide(out_dict['Arousal'],out_dict['DetectCount'])\n out_dict['MeanDominance'] = np.divide(out_dict['Dominance'],out_dict['DetectCount'])\n out_dict['PosNegValDifference'] = np.subtract(out_dict['MeanPosVal'] ,out_dict['MeanNegVal'])\n # Percentages:\n out_dict['DetectPosPercent'] = np.divide(out_dict['PosCount'],out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['PosCount'],out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['NegCount'],out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['NegCount'],out_dict['TokenCount'])\n out_dict['MeanSquaredValence'] = np.divide(out_dict['ValSq'],out_dict['DetectCount'])\n out_dict['ValenceDeviation'] = np.sqrt(out_dict['MeanSquaredValence'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\")\n elif method == 'discrete':\n if output == 'data_frame':\n mat['function_Percent'] = mat.function / mat.TokenCount\n mat['pronoun_Percent'] = mat.pronoun / mat.TokenCount\n mat['ppron_Percent'] = mat.ppron / mat.TokenCount\n mat['i_Percent'] = mat.i / mat.TokenCount\n mat['we_Percent'] = mat.we / mat.TokenCount\n mat['you_Percent'] = mat.you / mat.TokenCount\n mat['shehe_Percent'] = mat.shehe / mat.TokenCount\n mat['they_Percent'] = mat.they / mat.TokenCount\n mat['ipron_Percent'] = mat.ipron / mat.TokenCount\n mat['article_Percent'] = mat.article / mat.TokenCount\n mat['prep_Percent'] = mat.prep / mat.TokenCount\n mat['auxverb_Percent'] = mat.auxverb / mat.TokenCount\n mat['adverb_Percent'] = mat.adverb / mat.TokenCount\n mat['conj_Percent'] = mat.conj / mat.TokenCount\n mat['negate_Percent'] = mat.negate / mat.TokenCount\n mat['verb_Percent'] = mat.verb / mat.TokenCount\n mat['adj_Percent'] = mat.adj / mat.TokenCount\n mat['compare_Percent'] = mat.compare / mat.TokenCount\n mat['interrog_Percent'] = mat.interrog / mat.TokenCount\n mat['number_Percent'] = mat.number / mat.TokenCount\n mat['quant_Percent'] = mat.quant / mat.TokenCount\n mat['affect_Percent'] = mat.affect / mat.TokenCount\n mat['posemo_Percent'] = mat.posemo / mat.TokenCount\n mat['negemo_Percent'] = mat.negemo / mat.TokenCount\n mat['anx_Percent'] = mat.anx / mat.TokenCount\n mat['anger_Percent'] = mat.anger / mat.TokenCount\n mat['sad_Percent'] = mat.sad / mat.TokenCount\n mat['social_Percent'] = mat.social / mat.TokenCount\n mat['family_Percent'] = mat.family / mat.TokenCount\n mat['friend_Percent'] = mat.friend / mat.TokenCount\n mat['female_Percent'] = mat.female / mat.TokenCount\n mat['male_Percent'] = mat.male / mat.TokenCount\n mat['cogproc_Percent'] = mat.cogproc / mat.TokenCount\n mat['insight_Percent'] = mat.insight / mat.TokenCount\n mat['cause_Percent'] = mat.cause / mat.TokenCount\n mat['discrep_Percent'] = mat.discrep / mat.TokenCount\n mat['tentat_Percent'] = mat.tentat / mat.TokenCount\n mat['certain_Percent'] = mat.certain / mat.TokenCount\n mat['differ_Percent'] = mat.differ / mat.TokenCount\n mat['percept_Percent'] = mat.percept / mat.TokenCount\n mat['see_Percent'] = mat.see / mat.TokenCount\n mat['hear_Percent'] = mat.hear / mat.TokenCount\n mat['feel_Percent'] = mat.feel / mat.TokenCount\n mat['bio_Percent'] = mat.bio / mat.TokenCount\n mat['body_Percent'] = mat.body / mat.TokenCount\n mat['health_Percent'] = mat.health / mat.TokenCount\n mat['sexual_Percent'] = mat.sexual / mat.TokenCount\n mat['ingest_Percent'] = mat.ingest / mat.TokenCount\n mat['drives_Percent'] = mat.drives / mat.TokenCount\n mat['affiliation_Percent'] = mat.affiliation / mat.TokenCount\n mat['achieve_Percent'] = mat.achieve / mat.TokenCount\n mat['power_Percent'] = mat.power / mat.TokenCount\n mat['reward_Percent'] = mat.reward / mat.TokenCount\n mat['risk_Percent'] = mat.risk / mat.TokenCount\n mat['focuspast_Percent'] = mat.focuspast / mat.TokenCount\n mat['focuspresent_Percent'] = mat.focuspresent / mat.TokenCount\n mat['focusfuture_Percent'] = mat.focusfuture / mat.TokenCount\n mat['relativ_Percent'] = mat.relativ / mat.TokenCount\n mat['motion_Percent'] = mat.motion / mat.TokenCount\n mat['space_Percent'] = mat.space / mat.TokenCount\n mat['time_Percent'] = mat.time / mat.TokenCount\n mat['work_Percent'] = mat.work / mat.TokenCount\n mat['leisure_Percent'] = mat.leisure / mat.TokenCount\n mat['home_Percent'] = mat.home / mat.TokenCount\n mat['money_Percent'] = mat.money / mat.TokenCount\n mat['relig_Percent'] = mat.relig / mat.TokenCount\n mat['death_Percent'] = mat.death / mat.TokenCount\n mat['informal_Percent'] = mat.informal / mat.TokenCount\n mat['swear_Percent'] = mat.swear / mat.TokenCount\n mat['netspeak_Percent'] = mat.netspeak / mat.TokenCount\n mat['assent_Percent'] = mat.assent / mat.TokenCount\n mat['nonflu_Percent'] = mat.nonflu / mat.TokenCount\n mat['filler_Percent'] = mat.filler / mat.TokenCount\n mat['Detect_Percent'] = mat.DetectCount / mat.TokenCount\n return(mat)\n elif output == 'array':\n out_dict = {}\n out_dict['Affect'] = mat[:,:,21]\n out_dict['Posemo'] = mat[:,:,22]\n out_dict['Negemo'] = mat[:,:,23]\n out_dict['Anx'] = mat[:,:,24]\n out_dict['Anger'] = mat[:,:,25]\n out_dict['Sad'] = mat[:,:,26]\n out_dict['Function'] = mat[:,:,0]\n out_dict['CogProc'] = mat[:,:,32]\n out_dict['DetectCount'] = mat[:,:,-2]\n out_dict['TokenCount'] = mat[:,:,-1]\n\n out_dict['DetectPosPercent'] = np.divide(out_dict['Posemo'], out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['Posemo'], out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['Negemo'], out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['Negemo'], out_dict['TokenCount'])\n out_dict['EmoPosPercent'] = np.divide(out_dict['Posemo'],np.add(out_dict['Posemo'],out_dict['Negemo']))\n out_dict['DetectAnxPercent'] = np.divide(out_dict['Anx'], out_dict['DetectCount'])\n out_dict['OverallAnxPercent'] = np.divide(out_dict['Anx'], out_dict['TokenCount'])\n out_dict['DetectAngerPercent'] = np.divide(out_dict['Anger'], out_dict['DetectCount'])\n out_dict['OverallAngerPercent'] = np.divide(out_dict['Anger'], out_dict['TokenCount'])\n out_dict['DetectSadPercent'] = np.divide(out_dict['Sad'], out_dict['DetectCount'])\n out_dict['OverallSadPercent'] = np.divide(out_dict['Sad'], out_dict['TokenCount'])\n out_dict['DetectAffectPercent'] = np.divide(out_dict['Affect'], out_dict['DetectCount'])\n out_dict['OverallAffectPercent'] = np.divide(out_dict['Affect'], out_dict['TokenCount'])\n\n\n out_dict['DetectFunctionPercent'] = np.divide(out_dict['Function'], out_dict['DetectCount'])\n out_dict['OverallFunctionPercent'] = np.divide(out_dict['Function'], out_dict['TokenCount'])\n out_dict['DetectCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['DetectCount'])\n out_dict['OverallCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['TokenCount'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\") \n else:\n print(\"Error: Method not found!\")\n elif language == 'german':\n if method == 'dimensional':\n if output == 'data_frame':\n mat['NegCount'] = mat['DetectCount'] - mat['PosCount']\n mat['MeanNegVal'] = mat['NegVal'] / mat['NegCount']\n mat['MeanPosVal'] = mat['PosVal'] / mat['PosCount']\n mat['MeanArousal'] = mat['Arousal'] / mat['DetectCount']\n mat['MeanDominance'] = mat['Dominance'] / mat['DetectCount']\n mat['MeanPotency'] = mat['Potency'] / mat['DetectCount']\n mat['PosNegValDifference'] = mat['MeanPosVal'] - mat['MeanNegVal']\n mat['MeanValence'] = (mat['NegVal'] + mat['PosVal'])/ mat['DetectCount'] \n mat['AbsMeanNegVal'] = abs(mat['MeanNegVal'])\n mat['DetectPercent'] = mat['DetectCount'] / mat['TokenCount']\n mat['DensityValence'] =(mat['NegVal'] + mat['PosVal'])/ mat['TokenCount'] \n mat['DensityNegVal'] = mat['NegVal'] / mat['TokenCount']\n mat['DensityPosVal'] = mat['PosVal'] / mat['TokenCount']\n mat['DensityArousal'] = mat['Arousal'] / mat['TokenCount']\n mat['DensityDominance'] = mat['Dominance'] / mat['TokenCount']\n mat['MeanSquaredValence'] = mat['ValSq'] / mat['DetectCount']\n mat['ValenceDeviation'] = np.sqrt(mat['MeanSquaredValence'])\n return(mat)\n elif output == 'array':\n out_dict = {}\n out_dict['PosVal'] = mat[:,:,0]\n out_dict['NegVal'] = mat[:,:,1]\n out_dict['Arousal'] = mat[:,:,2]\n out_dict['Dominance'] = mat[:,:,3]\n out_dict['PosCount'] = mat[:,:,4]\n out_dict['DetectCount'] = mat[:,:,5]\n out_dict['Imagine'] = mat[:,:,6]\n out_dict['Potency'] = mat[:,:,7]\n out_dict['DomPot_Count'] = mat[:,:,8]\n out_dict['TokenCount'] = mat[:,:,9]\n out_dict['ValSq'] = mat[:,:,10]\n\n out_dict['DetectPercent'] = np.divide(out_dict['DetectCount'],out_dict['TokenCount'])\n out_dict['NegCount'] = np.subtract(out_dict['DetectCount'],out_dict['PosCount'])\n # Mean Values:\n out_dict['MeanValence'] = np.divide(np.add(out_dict['PosVal'],out_dict['NegVal']),out_dict['DetectCount'])\n out_dict['MeanNegVal'] = np.divide(out_dict['NegVal'],out_dict['NegCount'])\n out_dict['MeanPosVal'] = np.divide(out_dict['PosVal'],out_dict['PosCount'])\n out_dict['MeanArousal'] = np.divide(out_dict['Arousal'],out_dict['DetectCount'])\n out_dict['MeanDominance'] = np.divide(out_dict['Dominance'],out_dict['DomPot_Count'])\n out_dict['MeanPotency'] = np.divide(out_dict['Potency'],out_dict['DomPot_Count'])\n out_dict['PosNegValDifference'] = np.subtract(out_dict['MeanPosVal'] ,out_dict['MeanNegVal'])\n # Percentages:\n out_dict['DetectPosPercent'] = np.divide(out_dict['PosCount'],out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['PosCount'],out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['NegCount'],out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['NegCount'],out_dict['TokenCount'])\n out_dict['MeanSquaredValence'] = np.divide(out_dict['ValSq'],out_dict['DetectCount'])\n out_dict['ValenceDeviation'] = np.sqrt(out_dict['MeanSquaredValence'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\")\n elif method == 'discrete':\n if output == 'data_frame':\n mat['Pronoun_Percent'] = mat.Pronoun / mat.TokenCount\n mat['I_Percent'] = mat.I / mat.TokenCount\n mat['We_Percent'] = mat.We / mat.TokenCount\n mat['Self_Percent'] = mat.Self / mat.TokenCount\n mat['You_Percent'] = mat.You / mat.TokenCount\n mat['Other_Percent'] = mat.Other / mat.TokenCount\n mat['Negate_Percent'] = mat.Negate / mat.TokenCount\n mat['Assent_Percent'] = mat.Assent / mat.TokenCount\n mat['Article_Percent'] = mat.Article / mat.TokenCount\n mat['Preps_Percent'] = mat.Preps / mat.TokenCount\n mat['Number_Percent'] = mat.Number / mat.TokenCount\n mat['Affect_Percent'] = mat.Affect / mat.TokenCount\n mat['Posemo_Percent'] = mat.Posemo / mat.TokenCount\n mat['Posfeel_Percent'] = mat.Posfeel / mat.TokenCount\n mat['Optim_Percent'] = mat.Optim / mat.TokenCount\n mat['Negemo_Percent'] = mat.Negemo / mat.TokenCount\n mat['Anx_Percent'] = mat.Anx / mat.TokenCount\n mat['Anger_Percent'] = mat.Anger / mat.TokenCount\n mat['Sad_Percent'] = mat.Sad / mat.TokenCount\n mat['Cogmech_Percent'] = mat.Cogmech / mat.TokenCount\n mat['Cause_Percent'] = mat.Cause / mat.TokenCount\n mat['Insight_Percent'] = mat.Insight / mat.TokenCount\n mat['Discrep_Percent'] = mat.Discrep / mat.TokenCount\n mat['Inhib_Percent'] = mat.Inhib / mat.TokenCount\n mat['Tentat_Percent'] = mat.Tentat / mat.TokenCount\n mat['Certain_Percent'] = mat.Certain / mat.TokenCount\n mat['Senses_Percent'] = mat.Senses / mat.TokenCount\n mat['See_Percent'] = mat.See / mat.TokenCount\n mat['Hear_Percent'] = mat.Hear / mat.TokenCount\n mat['Feel_Percent'] = mat.Feel / mat.TokenCount\n mat['Social_Percent'] = mat.Social / mat.TokenCount\n mat['Comm_Percent'] = mat.Comm / mat.TokenCount\n mat['Othref_Percent'] = mat.Othref / mat.TokenCount\n mat['Friends_Percent'] = mat.Friends / mat.TokenCount\n mat['Family_Percent'] = mat.Family / mat.TokenCount\n mat['Humans_Percent'] = mat.Humans / mat.TokenCount\n mat['Time_Percent'] = mat.Time / mat.TokenCount\n mat['Past_Percent'] = mat.Past / mat.TokenCount\n mat['Present_Percent'] = mat.Present / mat.TokenCount\n mat['Future_Percent'] = mat.Future / mat.TokenCount\n mat['Space_Percent'] = mat.Space / mat.TokenCount\n mat['Up_Percent'] = mat.Up / mat.TokenCount\n mat['Down_Percent'] = mat.Down / mat.TokenCount\n mat['Incl_Percent'] = mat.Incl / mat.TokenCount\n mat['Excl_Percent'] = mat.Excl / mat.TokenCount\n mat['Motion_Percent'] = mat.Motion / mat.TokenCount\n mat['Occup_Percent'] = mat.Occup / mat.TokenCount\n mat['School_Percent'] = mat.School / mat.TokenCount\n mat['Job_Percent'] = mat.Job / mat.TokenCount\n mat['Achieve_Percent'] = mat.Achieve / mat.TokenCount\n mat['Leisure_Percent'] = mat.Leisure / mat.TokenCount\n mat['Home_Percent'] = mat.Home / mat.TokenCount\n mat['Sports_Percent'] = mat.Sports / mat.TokenCount\n mat['TV_Percent'] = mat.TV / mat.TokenCount\n mat['Music_Percent'] = mat.Music / mat.TokenCount\n mat['Money_Percent'] = mat.Money / mat.TokenCount\n mat['Metaph_Percent'] = mat.Metaph / mat.TokenCount\n mat['Relig_Percent'] = mat.Relig / mat.TokenCount\n mat['Death_Percent'] = mat.Death / mat.TokenCount\n mat['Physcal_Percent'] = mat.Physcal / mat.TokenCount\n mat['Body_Percent'] = mat.Body / mat.TokenCount\n mat['Sexual_Percent'] = mat.Sexual / mat.TokenCount\n mat['Eating_Percent'] = mat.Eating / mat.TokenCount\n mat['Sleep_Percent'] = mat.Sleep / mat.TokenCount\n mat['Groom_Percent'] = mat.Groom / mat.TokenCount\n mat['Swear_Percent'] = mat.Swear / mat.TokenCount\n mat['Nonfl_Percent'] = mat.Nonfl / mat.TokenCount\n mat['Fillers_Percent'] = mat.Fillers / mat.TokenCount\n mat['Swiss_Percent'] = mat.Swiss / mat.TokenCount\n mat['Ideo_Percent'] = mat.Ideo / mat.TokenCount\n mat['Personalpronomina_Percent'] = mat.Personalpronomina / mat.TokenCount\n mat['Indefinitpronomina_Percent'] = mat.Indefinitpronomina / mat.TokenCount\n mat['AuxiliaryVerbs_Percent'] = mat.AuxiliaryVerbs / mat.TokenCount\n mat['Konjunktionen_Percent'] = mat.Konjunktionen / mat.TokenCount\n mat['Adverbien_Percent'] = mat.Adverbien / mat.TokenCount\n mat['Detect_Percent'] = mat.LIWC_Counter / mat.TokenCount\n mat['Bedrohung_Percent'] = mat.Bedrohung / mat.TokenCount\n return(mat)\n\n elif output == 'array':\n out_dict = {}\n out_dict['Affect'] = mat[:,:,11]\n out_dict['Posemo'] = mat[:,:,12]\n out_dict['Posfeel'] = mat[:,:,13]\n out_dict['Optim'] = mat[:,:,14]\n out_dict['Negemo'] = mat[:,:,15]\n out_dict['Anx'] = mat[:,:,16]\n out_dict['Anger'] = mat[:,:,17]\n out_dict['Sad'] = mat[:,:,18]\n out_dict['Function'] = mat[:,:,0]\n out_dict['CogProc'] = mat[:,:,32]\n out_dict['DetectCount'] = mat[:,:,-2]\n out_dict['TokenCount'] = mat[:,:,-1]\n\n out_dict['DetectPosPercent'] = np.divide(out_dict['Posemo'], out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['Posemo'], out_dict['TokenCount'])\n out_dict['DetectPosfeelPercent'] = np.divide(out_dict['Posfeel'], out_dict['DetectCount'])\n out_dict['OverallPosfeelPercent'] = np.divide(out_dict['Posfeel'], out_dict['TokenCount'])\n out_dict['DetectOptimPercent'] = np.divide(out_dict['Optim'], out_dict['DetectCount'])\n out_dict['OverallOptimPercent'] = np.divide(out_dict['Optim'], out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['Negemo'], out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['Negemo'], out_dict['TokenCount'])\n out_dict['EmoPosPercent'] = np.divide(out_dict['Posemo'],np.add(out_dict['Posemo'],out_dict['Negemo']))\n out_dict['DetectAnxPercent'] = np.divide(out_dict['Anx'], out_dict['DetectCount'])\n out_dict['OverallAnxPercent'] = np.divide(out_dict['Anx'], out_dict['TokenCount'])\n out_dict['DetectAngerPercent'] = np.divide(out_dict['Anger'], out_dict['DetectCount'])\n out_dict['OverallAngerPercent'] = np.divide(out_dict['Anger'], out_dict['TokenCount'])\n out_dict['DetectSadPercent'] = np.divide(out_dict['Sad'], out_dict['DetectCount'])\n out_dict['OverallSadPercent'] = np.divide(out_dict['Sad'], out_dict['TokenCount'])\n\n out_dict['DetectAffectPercent'] = np.divide(out_dict['Affect'], out_dict['DetectCount'])\n out_dict['OverallAffectPercent'] = np.divide(out_dict['Affect'], out_dict['TokenCount'])\n out_dict['DetectFunctionPercent'] = np.divide(out_dict['Function'], out_dict['DetectCount'])\n out_dict['OverallFunctionPercent'] = np.divide(out_dict['Function'], out_dict['TokenCount'])\n out_dict['DetectCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['DetectCount'])\n out_dict['OverallCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['TokenCount'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\") \n else:\n print(\"Error: Method not found!\") \n elif language == 'chinese':\n if method == 'dimensional':\n if output == 'data_frame':\n print(\"Error: This combination doesn't exist yet!\")\n elif output == 'array':\n print(\"Error: This combination doesn't exist yet!\")\n else:\n print(\"Error: Output Format not found!\")\n elif method == 'discrete':\n if output == 'data_frame':\n print(\"Error: This combination doesn't exist yet!\")\n elif output == 'array':\n out_dict = {}\n out_dict['Affect'] = mat[:,:,30]\n out_dict['Posemo'] = mat[:,:,31]\n out_dict['Negemo'] = mat[:,:,32]\n out_dict['Anx'] = mat[:,:,33]\n out_dict['Anger'] = mat[:,:,34]\n out_dict['Sad'] = mat[:,:,35]\n out_dict['Function'] = mat[:,:,0]\n out_dict['CogProc'] = mat[:,:,41]\n out_dict['DetectCount'] = mat[:,:,-2]\n out_dict['TokenCount'] = mat[:,:,-1]\n\n out_dict['DetectPosPercent'] = np.divide(out_dict['Posemo'], out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['Posemo'], out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['Negemo'], out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['Negemo'], out_dict['TokenCount'])\n out_dict['EmoPosPercent'] = np.divide(out_dict['Posemo'],np.add(out_dict['Posemo'],out_dict['Negemo']))\n out_dict['DetectAnxPercent'] = np.divide(out_dict['Anx'], out_dict['DetectCount'])\n out_dict['OverallAnxPercent'] = np.divide(out_dict['Anx'], out_dict['TokenCount'])\n out_dict['DetectAngerPercent'] = np.divide(out_dict['Anger'], out_dict['DetectCount'])\n out_dict['OverallAngerPercent'] = np.divide(out_dict['Anger'], out_dict['TokenCount'])\n out_dict['DetectSadPercent'] = np.divide(out_dict['Sad'], out_dict['DetectCount'])\n out_dict['OverallSadPercent'] = np.divide(out_dict['Sad'], out_dict['TokenCount'])\n out_dict['DetectAffectPercent'] = np.divide(out_dict['Affect'], out_dict['DetectCount'])\n out_dict['OverallAffectPercent'] = np.divide(out_dict['Affect'], out_dict['TokenCount'])\n out_dict['DetectPercent'] = np.divide(out_dict['DetectCount'], out_dict['TokenCount'])\n\n out_dict['DetectFunctionPercent'] = np.divide(out_dict['Function'], out_dict['DetectCount'])\n out_dict['OverallFunctionPercent'] = np.divide(out_dict['Function'], out_dict['TokenCount'])\n out_dict['DetectCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['DetectCount'])\n out_dict['OverallCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['TokenCount'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\") \n else:\n print(\"Error: Method not found!\") \n else:\n print(\"Error: Language not found!\")", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n\n n = tp + tn + fp + fn\n e1 = (fn * (fn + 2 * tp) / (tp + fn) + fp * (fp + 2 * tn) / (tn + fp)) / n\n e2 = (fp * (fp + 2 * tp) / (tp + fp) + fn * (fn + 2 * tn) / (tn + fn)) / n\n\n return min(e1, e2)", "def calculate(self):\n\n return 2 * self.confusion_matrix.tp / \\\n (2 * self.confusion_matrix.tp + self.confusion_matrix.fp + self.confusion_matrix.fn)", "def test_get_derived_metric(self):\n pass", "def get_dps(data_dict, R = 50):\n p1_ida = iterable_data_array(data_dict, 'p1')\n p2_ida = iterable_data_array(data_dict, 'p2')\n worker = data_array_builder()\n \n for p1, p2 in zip(p1_ida, p2_ida):\n worker.append((p1 - p2)/R) \n \n return {'time':data_dict['time'], 'dp':worker.build()}", "def evaluate_viz_metrics(y_emb, dataset, verbose=1):\n results = {}\n\n # results[\"trust\"] = trustworthiness(dataset.inputs, y_emb, n_neighbors=5, metric=distance_metric)\n results[\"One NN accuracy\"] = nearest_neighbours_generalisation_accuracy(y_emb, dataset.labels, 1)\n results[\"Avg graph distance\"], results[\"Avg feature distance\"] = combined_dist_metric(y_emb, dataset.inputs, dataset.adj_matrix, k=5)\n results['Total distance'] = results[\"Avg graph distance\"] + results[\"Avg feature distance\"]\n\n if verbose:\n for k, v in results.items():\n print(\"{} = {:.4f}\".format(k, v))\n return results", "def getDensityEstimate(self):\n return self.density", "def r_d(self, tl):\n\t return self.RD0*exp(self.HKR/(R*self.TO)*(1. - self.TO/tl))", "def ComputeEnergyConsumption(self):\r\n pass", "def total_sdram_requirements(self):", "def compute(dm,do):\n mae = MV.average(MV.absolute(MV.subtract(dm,do)))\n return float(mae)", "def _calculate(self):\n source = self.source\n res = {}\n l_cols = [[], [], [], []]\n r_lines = {}\n dateline=None\n ###delete the below code when fetch data from database(assume: data in database has been pretreatment)\n if source[t.ror].min() > -99.0:\n pass\n else:\n source[t.ror] = np.where(\n source[t.ror] > -99.0, source[t.ror], -99.0)\n ###\n for account in self.accounts:\n source_account = source[source[t.account] == account]\n source_account = source_account.reset_index(drop=True)\n dateline=source_account[t.effective_date]\n ror=source_account[t.ror]/100\n returns_cum = ROR.ror_cum_ann(source_account, self.annualized)\n # double_return_cum=round(double_return_cum,2)+1\n returns_cum = returns_cum + 1\n growth_amounts = returns_cum * self.starting_value\n returns_cum, growth_amounts = round(returns_cum - 1, 4), \\\n round(growth_amounts, 2)\n l_cols[0].append(growth_amounts.iloc[-1, 0])#account growth amount\n l_cols[1].append(growth_amounts.iloc[-1, 1])#bench growth amount\n l_cols[2].append(returns_cum.iloc[-1, 0])#account return\n l_cols[3].append(returns_cum.iloc[-1, 1])#bench return\n r_lines[account] = [list(returns_cum.iloc[:,0]), list(growth_amounts.iloc[:, 0]),#list(returns_cum.iloc[:, 0])\n list(growth_amounts.iloc[:, 1])]#account return, account growth amount, bench growth amount\n res['account_vs_benchmark'] = {'xAxis': self.accounts,\n 'series': l_cols}\n res['growth_of_unit'] = {'xAxis': list(dateline),\n 'series': r_lines}\n return res\n # ret_dict = self._ret(accounts, starting_value, source, annualized)\n # return ret_dict", "def calculate(self):\n\n return self.confusion_matrix.tp / (self.confusion_matrix.tp + self.confusion_matrix.fn)", "def metric(self, i1, i2):\n xx = self._try_cache(self.i2e[i1[0]])\n yy = self._try_cache(self.i2e[i2[0]])\n\n \"\"\"Don't need to cache the xy similarity because it doesn't have other uses\"\"\"\n xy = parasail.nw_stats(self.i2e[i1[0]], self.i2e[i2[0]], **self.paraParams).score\n\n D = xx + yy - 2 * xy\n return D", "def compute_dsc(self, original, shrinked):\n for i in range(len(original)):\n x, y = original[i]\n orig = y-x\n\n a, b = shrinked[i]\n new = b-a\n\n dsc = float(new/orig)\n\n return dsc", "def dset(self):\n\n a = 0.0\n b = 0.0\n sums = np.sum(self.descriptors, axis=0)\n for sum in sums:\n if sum > 0:\n if sum == self.d_length:\n b += 1.\n else:\n a += 1.\n return a / (a+b)", "def compute(self) -> Any:\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n return per_class, micro, macro, weighted", "def calculateR(sapienses: list) -> float:\n r = 0\n for i in sapienses:\n r = r + i.numberInfected\n r=r/I0\n r = r*S/(S+R+D)\n return r", "def _compute_recons_metrics(cls, quant_module: StaticGridQuantWrapper, act_func, inp_data: torch.Tensor,\n out_data: torch.Tensor) -> Tuple[float, float]:\n adaround_quantizer = quant_module.param_quantizers['weight']\n\n # Enable hard rounding and get quantized wrapper module's output\n adaround_quantizer.use_soft_rounding = False\n out_data_hard = cls._compute_output_with_adarounded_weights(quant_module, inp_data)\n\n # Enable soft rounding and get quantized wrapper module's output\n adaround_quantizer.use_soft_rounding = True\n out_data_soft = cls._compute_output_with_adarounded_weights(quant_module, inp_data)\n\n # If followed by an activation function\n if act_func is not None:\n out_data = act_func(out_data)\n out_data_soft = act_func(out_data_soft)\n out_data_hard = act_func(out_data_hard)\n\n recons_err_soft = functional.mse_loss(out_data_soft, out_data)\n recons_err_hard = functional.mse_loss(out_data_hard, out_data)\n\n return float(recons_err_hard), float(recons_err_soft)", "def evaluate_de_res(de_res, de_genes, adj_p_cutoff=0.05):\n n_genes = de_res.shape[0]\n called_pos = np.logical_and(de_res['tested'], de_res['adjusted_p_value'] <= adj_p_cutoff)\n actual_pos = np.array([g in de_genes for g in xrange(n_genes)], dtype=bool)\n true_pos = np.logical_and(called_pos, actual_pos)\n sens = true_pos.sum().astype(float) / actual_pos.sum()\n ppv = true_pos.sum().astype(float) / called_pos.sum()\n\n return sens, ppv", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n n = self.confusion_matrix.n\n\n fn_tp = fn + tp\n fp_tp = fp + tp\n\n h1 = -((fn_tp / n) * math.log2(fn_tp / n) +\n (1 - fn_tp / n) * math.log2(1 - fn_tp / n))\n\n h2 = -((fp_tp / n) * math.log2(fp_tp / n) +\n (1 - fp_tp / n) * math.log2(1 - fp_tp / n))\n\n p00 = 1 if tn == 0 else (tn / n)\n p01 = 1 if fn == 0 else (fn / n)\n p10 = 1 if fp == 0 else (fp / n)\n p11 = 1 if tp == 0 else (tp / n)\n\n h12 = -((tn / n) * math.log2(p00) +\n (fn / n) * math.log2(p01) +\n (fp / n) * math.log2(p10) +\n (tp / n) * math.log2(p11))\n\n mi = h1 + h2 - h12\n return mi", "def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)", "def get_fractional_degradation(bt):\n\n\n\n NomIch = 0.125 # Nominal charge current\n NomId = 0.25 # Nominal discharge current\n NomSoC = 0.5 # Nominal state of charge_mode\n NomDoD = 1.0 # Nominal depth of discharge\n B = 5 #Battery capacity\n qt = 5 * 0.5 # Amount of energy in the battery at the start\n # Determin charge of discharge\n if bt > 0:\n Id = bt/(B*1) # time interval differnece is 1\n Ich = NomIch\n else:\n Ich = bt/(B*1)\n Id = NomId\n\n #Calculate average State of Charge\n SoC = 100 * (qt - 0.5*bt)/B\n\n #Calculate Depth of Discharge\n DoD = 100 * bt /B\n\n # Functions\n nCL1 = (e * np.exp (f * Id) + g * np.exp(h * Id))/ (e * np.exp (f * NomId) + g * np.exp(h * NomId))\n nCL2 = (m * np.exp (n * Ich) + o * np.exp(p * Ich))/ (m* np.exp (n* NomIch) + o * np.exp(p * NomIch))\n nCL3 = get_CL4(DoD, SoC)/get_CL4(NomDoD, NomSoC)\n nCL = nCL1 * nCL2 * nCL3\n Fractional_D = (0.5/3650)/ nCL\n return Fractional_D", "def _calc_r2(self):\n sse = np.sum((self.data.y - self.predict(self.data.x))**2)\n sst = np.sum((self.data.y - self.data.y.mean())**2)\n return (1. - sse/sst)", "def calc_metrics(self, data, output):\n\n L1NormITAE = self.calcL1NormITAE(data)\n L1NormAbs = self.calcL1NormAbs(data)\n #\n # print 'ITAE score: ', errorIntegral\n print 'L1NormITAE: ', L1NormITAE\n print 'L1NormAbs: ', L1NormAbs\n print '\\n'\n output.update({'L1NormITAE': L1NormITAE, 'L1NormAbs': L1NormAbs})", "def _calculate_metrics(self):\n metrics = {}\n precision, recall = self.calc_precision_recall()\n metrics[\"precision\"] = precision\n metrics[\"recall\"] = recall\n metrics[\"entropy\"] = self.calc_entropy()\n metrics[\"component_entropy\"] = self.calc_component_entropy()\n metrics[\"num_comps\"] = len(self.get_components())\n metrics[\"num_diagnoses\"] = len(self.diagnoses)\n metrics[\"distinct_diagnoses_scores\"] = len(Counter(list(map(lambda x: x.probability, self.diagnoses))))\n metrics[\"num_tests\"] = len(self.get_tests())\n metrics[\"num_distinct_traces\"] = len(self.get_distinct_traces())\n metrics[\"num_failed_tests\"] = len(self._get_tests_by_error(1))\n metrics[\"num_passed_tests\"] = len(self._get_tests_by_error(0))\n passed_comps = set(self._get_components_by_error(0))\n failed_comps = set(self.get_components_in_failed_tests())\n metrics[\"num_failed_comps\"] = len(failed_comps)\n metrics[\"only_failed_comps\"] = len(failed_comps - passed_comps)\n metrics[\"only_passed_comps\"] = len(passed_comps - failed_comps)\n metrics[\"num_bugs\"] = len(self.get_bugs())\n metrics[\"wasted\"] = self.calc_wasted_components()\n metrics[\"top_k\"] = self.calc_top_k()\n metrics[\"num_comps_in_diagnoses\"] = len(self._get_comps_in_diagnoses())\n metrics[\"bugs_cover_ratio\"] = self._get_bugs_cover_ratio()\n metrics[\"average_trace_size\"] = self._get_average_trace_size()\n metrics[\"average_component_activity\"] = self._get_average_component_activity()\n metrics[\"average_diagnosis_size\"] = self._get_average_diagnosis_size()\n metrics[\"bugs_scores_average\"], metrics[\"bugs_scores_std\"], metrics[\"bugs_scores_entropy\"] = self._get_bugs_scores()\n metrics[\"non_bugs_scores_average\"], metrics[\"non_bugs_scores_std\"], metrics[\"non_bugs_scores_entropy\"] = self._get_non_bugs_scores()\n metrics.update(self.cardinality())\n # metrics[\"ochiai\"] = self.calc_ochiai_values()\n return metrics", "def calculate_batch_metrics(self):\n pass", "def calc_metric3(K_tilda):\n trace = np.trace(K_tilda)\n # determinant = np.linalg.det(K_tilda)\n _, log_determinant = np.linalg.slogdet(K_tilda)\n diff = trace - log_determinant\n print(trace, log_determinant, diff)\n return diff", "def evaluate(pred_file, ref_file):\n ref_dict, pred_dict, query_dict, id_dict = build_pred_ref_dict(ref_file, pred_file, ref_file)\n total, acc, scores = res_eval_with_type_acc(query_dict, pred_dict, ref_dict, id_dict, save=False)\n em = calculate_exact_match(pred_dict, ref_dict)\n print('Comp Acc: {:.3f}%\\tBleu-4: {:.3f}\\tRouge-L: {:.3f}'.format(acc, scores['Bleu-4'], scores['Rouge-L']))\n print('EM: {:.3f}%'.format(em))\n # calculate_sketch_type_acc(ref_file, pred_file)\n # calculate_exact_match_for_each_q_type(ref_file, pred_file)\n return total, acc, scores, em", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n n = self.confusion_matrix.n\n\n fp_tn = tn + fp\n tp_fn = fn + tp\n tn_fn = tn + fn\n tp_fp = fp + tp\n nis = tn_fn * tn_fn + tp_fp * tp_fp\n njs = fp_tn * fp_tn + tp_fn * tp_fn\n sum_of_squares = tp * tp + tn * tn + fp * fp + fn * fn\n\n a = (tp * (tp - 1) + fp * (fp - 1) + tn * (tn - 1) + fn * (fn - 1)) / 2.\n b = (njs - sum_of_squares) / 2.\n c = (nis - sum_of_squares) / 2.\n d = (n * n + sum_of_squares - nis - njs) / 2.\n\n x1 = a - ((a + c) * (a + b) / (a + b + c + d))\n x2 = ((a + c) + (a + b)) / 2.\n x3 = ((a + c) * (a + b)) / (a + b + c + d)\n denominator = x2 - x3\n\n if denominator != 0:\n return x1 / denominator\n else:\n return 0", "def dc(self):\n return np.array(self['dc'], dtype=np.float32) / 1000", "def precalculate():\n pass", "def precalculate():\n pass", "def _compute_metrics(hits_or_lcs: int, pred_len: int, target_len: int) ->Dict[str, Tensor]:\n precision = hits_or_lcs / pred_len\n recall = hits_or_lcs / target_len\n if precision == recall == 0.0:\n return dict(precision=tensor(0.0), recall=tensor(0.0), fmeasure=tensor(0.0))\n fmeasure = 2 * precision * recall / (precision + recall)\n return dict(precision=tensor(precision), recall=tensor(recall), fmeasure=tensor(fmeasure))", "def get_expected_cost(self):", "def measure(self):\n return self._measure", "def performace_measure(data,pred):\n true = data['clicks']\n weights = weighting(data)\n diff = true-pred.astype(int)\n return np.sqrt(np.inner(weights,diff*diff)/weights.sum())", "def compute_key_value(self) -> Dict[str, float]:\n # @TODO: ddp hotfix, could be done better\n if self._is_ddp:\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = self.compute()\n metrics = self._convert_metrics_to_kv(\n per_class=per_class, micro=micro, macro=macro, weighted=weighted\n )\n return metrics", "def calculate_metrics(self):\n sensitivity = TP + FN\n sensitivity = TP / sensitivity\n\n specificity = TN + FP\n specificity = TN / specificity\n\n accuracy = TP + FP + TN + FN\n divisor = TP + TN\n accuracy = divisor / accuracy\n\n positive_predictive = TP + FP\n positive_predictive = TP / positive_predictive\n\n negative_predictive = TN + FN\n negative_predictive = TN / negative_predictive\n\n # This is for format decimal in metrics\n sensitivity = float(\"{0:.4f}\".format(sensitivity))\n specificity = float(\"{0:.4f}\".format(specificity))\n accuracy = float(\"{0:.4f}\".format(accuracy))\n positive_predictive = float(\"{0:.4f}\".format(positive_predictive))\n negative_predictive = float(\"{0:.4f}\".format(negative_predictive))\n\n average = (sensitivity + specificity + accuracy + positive_predictive + negative_predictive) / 5\n\n average = float(\"{0:.4f}\".format(average))\n\n metrics = [sensitivity, specificity, accuracy,positive_predictive,negative_predictive, average]\n\n return metrics", "def tldiffusion(self, dt):\n\n # Reset erosion, depo, trans and flux_in to 0\n self.erosion[:] = 0.0\n self.depo[:] = 0.0\n self.trans[:] = 0.0\n self.flux_in[:] = 0.0\n\n # Downstream steepest slope at node:\n self.steepest = self.grid.at_node[\"topographic__steepest_slope\"]\n # On each node, node ID of downstream receiver node\n # (on node (i), ID of node that receives flow from node (i)):\n self.receiver = self.grid.at_node[\"flow__receiver_node\"]\n\n dx = self.grid.dx\n cores = self.grid.core_nodes\n\n # Calculate influx rate on node i = outflux of nodes\n # whose receiver is i\n for i in self.grid.core_nodes:\n self.flux_in[self.receiver[i]] += self.flux_out[i]\n\n # Calculate transport coefficient\n # When S ~ Scrit, d_coeff is set to \"infinity\", for stability and\n # so that there is no deposition\n if self.steepest[i] >= self.slope_crit:\n self.d_coeff[i] = 1000000000.0\n else:\n self.d_coeff[i] = 1 / (\n 1 - (np.power(((self.steepest[i]) / self.slope_crit), 2))\n )\n\n # Calculate deposition rate on node\n self.depo[cores] = self.flux_in[cores] / self.d_coeff[cores]\n\n # Calculate erosion rate on node (positive value)\n # If S > Scrit, erosion is simply set for the slope to return to Scrit\n # Otherwise, erosion is slope times erodibility coefficent\n for i in self.grid.core_nodes:\n if self.steepest[i] > self.slope_crit:\n self.erosion[i] = dx * (self.steepest[i] - self.slope_crit) / (100 * dt)\n else:\n self.erosion[i] = self.k * self.steepest[i]\n\n # Update elevation\n self.elev[i] += (-self.erosion[i] + self.depo[i]) * dt\n\n # Calculate transfer rate over node\n self.trans[cores] = self.flux_in[cores] - self.depo[cores]\n\n # Calculate outflux rate\n self.flux_out[:] = self.erosion + self.trans", "def DW_cal(data, data_sm):\n n = len(data)\n numerator = 0\n denominator = 0\n for i in range(n):\n if i == 0:\n numerator = numerator + 0\n else:\n numerator = numerator + ((data[i] - data_sm[i]) - (data[i-1] - data_sm[i-1]))**2\n denominator = denominator + (data[i] - data_sm[i])**2\n return numerator/denominator*n/(n - 1)", "def computePRMeasures(self, targetLabels, actualLabels):\r\n if self.basicMeasures is None:\r\n self.basicMeasures = self.computeBasicStatistics(targetLabels, actualLabels)\r\n if self.basicMeasures[0] == 0:\r\n self.prMeasures = (0,0)\r\n else:\r\n self.prMeasures = ((0.0 + self.basicMeasures[0]) / (self.basicMeasures[0] + self.basicMeasures[1]),\r\n (0.0 + self.basicMeasures[0]) / (self.basicMeasures[0] + self.basicMeasures[3]))\r\n return self.prMeasures", "def test_calculate_supervisory_delta_call(self):\n SDC = calculate_supervisory_delta_call()\n \n self.assertEqual(SDC, 0.73)", "def _df_reg(self):\n return self.k" ]
[ "0.6594812", "0.65652555", "0.647981", "0.647346", "0.63362116", "0.62262213", "0.6224595", "0.618598", "0.61559206", "0.6152622", "0.61477774", "0.6140981", "0.61260253", "0.61076653", "0.6089313", "0.60319406", "0.60208917", "0.59696853", "0.5940918", "0.59328157", "0.59270597", "0.5926883", "0.590515", "0.590515", "0.5860092", "0.5846103", "0.582652", "0.5809938", "0.5807385", "0.5788959", "0.57857585", "0.5770826", "0.5766159", "0.5759158", "0.5757527", "0.5738876", "0.5727232", "0.5727232", "0.57269853", "0.5724303", "0.57215774", "0.57004845", "0.5694584", "0.5694022", "0.56929445", "0.5691002", "0.56899214", "0.5674154", "0.5671199", "0.566174", "0.5648299", "0.56385", "0.56251895", "0.56099033", "0.56099015", "0.56061935", "0.56050724", "0.5598219", "0.55930763", "0.55926406", "0.5591708", "0.5578496", "0.5576069", "0.55741394", "0.55596596", "0.5559042", "0.55467093", "0.55432326", "0.55430156", "0.5542387", "0.5542298", "0.5541264", "0.55404663", "0.553962", "0.55378175", "0.5536428", "0.5534574", "0.5530079", "0.552935", "0.5525403", "0.5510374", "0.5503211", "0.55012035", "0.5496375", "0.5494377", "0.54934275", "0.5491775", "0.54901385", "0.54901385", "0.54895866", "0.5487486", "0.5486207", "0.5484212", "0.54791605", "0.547835", "0.54772925", "0.5468326", "0.5466757", "0.5465491", "0.5462701" ]
0.56650126
49
Method which calculates TS Percentage metric for a player
def set_ts_percentage(self): bx = self.get_standard_stats() ptos = float(bx["t2p_conv"]*2 + bx["t3p_conv"]*3 + bx["tl_conv"]) tcInt = float(bx["t2p_int"] + bx["t3p_int"]) tsAttempts = float(tcInt + (0.44*float(bx["tl_int"]))) result = 0.00 if tsAttempts > 0.00: result = (ptos/(2*tsAttempts))*100 self.ts_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pct(self):\n\t\treturn self.bottle.pct()", "def percentCheck(currentTimeLabel, totalTimeLabel):\n # Updated 11/19/16\n try:\n progPercent = float(currentTimeLabel) / float(totalTimeLabel) * 100\n except (ValueError , ZeroDivisionError):\n progPercent = 0\n \n return progPercent", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)", "def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent", "def getPercent(*args):", "def getPercent(*args):", "def percentage_update(self):\n\n self.event_update()\n return self.percentage", "def update_calculated_stats(self, player_data):\n # Updates calculated statistics\n fga = player_data['FGA']\n fgm = player_data['FGM']\n pa3 = player_data['3FGA']\n pm3 = player_data['3FGM']\n try:\n player_data['FG%'] = fgm/fga\n except:\n player_data['FG%'] = 0.0\n try:\n player_data['3FG%'] = pm3/pa3\n except:\n player_data['3FG%'] = 0.0\n return(player_data)", "async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"", "def set_usg_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n tcInt = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n a = tcInt + (Decimal('0.44')*bx[\"tl_int\"]) + bx[\"turnovers\"]\n b = team[\"minutes\"]/5\n c = (team[\"t2p_int\"] + team[\"t3p_int\"]) + (Decimal('0.44')*team[\"tl_int\"]) + team[\"turnovers\"]\n result = 0.00\n if bx[\"minutes\"] > 0:\n result = ((Decimal(a)*Decimal(b))/(bx[\"minutes\"]*c))*100\n self.usg_percentage = \"%.2f\" % round(result, 2)", "def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT", "def set_assists_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n team_tc_conv = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n player_tc_conv = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0:\n result = (bx[\"assists\"] / (((bx[\"minutes\"] / (team[\"minutes\"] / 5)) * team_tc_conv) - player_tc_conv))*100\n result = result if result <= 100 and result >= 0 else 0\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.assists_percentage = \"%.2f\" % round(result, 2)", "def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None", "def percentage(self):\n temp = self.cpu_freq_time_spent.copy()\n for i in self.cpu_freq_time_spent:\n total = 0\n for j in self.cpu_freq_time_spent[i]:\n total += self.cpu_freq_time_spent[i][j]\n for j in self.cpu_freq_time_spent[i]:\n if total != 0:\n temp[i][j] = self.cpu_freq_time_spent[i][j] * 100 / total\n else:\n temp[i][j] = 0\n return temp", "def get_score_percent(self, value):\n qs_related = RoundData.objects.prefetch_related(\n 'shotdata').select_related('shotdata')\n\n round_holes = int(self.round_type)\n\n if value == 'par':\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'birdie_better':\n return round((qs_related.filter(shotdata__nr_strokes__lt=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'tbogey_worse':\n return round((qs_related.filter(shotdata__nr_strokes__gte=F('shotdata__hole__par')+3).count()/round_holes), 2)\n if isinstance(value, int):\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par') + value).count()/round_holes), 2)", "def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))", "def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)", "def percentage(count, total):\n return count / total * 100", "def get_percent(self):\n return self.percent", "def get_percentage(self):\n return self.percentage", "def get_percentage(self):\n return self.percentage", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)", "def as_counts_and_pcts(self):\n if self.e_pct is None:\n self.calculate_percentages()\n\n score_str = 'E: ' + str(self.e_score) + '(' + str(self.e_pct) + '%)/'\n score_str += 'I: ' + str(self.i_score) + '(' + str(self.i_pct) + '%) - '\n score_str += 'N: ' + str(self.n_score) + '(' + str(self.n_pct) + '%)/'\n score_str += 'S: ' + str(self.s_score) + '(' + str(self.s_pct) + '%) - '\n score_str += 'F: ' + str(self.f_score) + '(' + str(self.f_pct) + '%)/'\n score_str += 'T: ' + str(self.t_score) + '(' + str(self.t_pct) + '%) - '\n score_str += 'J: ' + str(self.j_score) + '(' + str(self.j_pct) + '%)/'\n score_str += 'P: ' + str(self.p_score) + '(' + str(self.p_pct) + '%)'\n return score_str", "def update_percent(self):", "def p(party, vote_count, s):\n return t(party, vote_count) / d(s)", "def calcMetrics(TP, P, T, percent=True):\r\n precision = TP / P if P else 0\r\n recall = TP / T if T else 0\r\n FB1 = 2 * precision * recall / (precision + recall) if precision + recall else 0\r\n if percent:\r\n return 100 * precision, 100 * recall, 100 * FB1\r\n else:\r\n return precision, recall, FB1", "def calculate_progress_percentage(d):\n successcounter = 0\n for test in d:\n if d[test][\"status\"] != \"not yet run\":\n successcounter += 1\n totalcounter = 0\n for test in d:\n totalcounter += 1\n return int(successcounter / totalcounter * 100)", "def get_song_percent_remaining(result):\n return int((1 - (get_song_elapsed_milliseconds(result) / get_song_length_milliseconds(result))) * 100)", "def set_steals_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n poss = self.get_team_possessions()\n result = 0.00\n if bx[\"minutes\"] > 0:\n result = ((bx[\"steals\"] * (team[\"minutes\"]/Decimal('5'))) / Decimal(float(bx[\"minutes\"]) * poss)) * 100\n self.steals_percentage = \"%.2f\" % round(result, 2)", "def percent(self):\r\n return self._percent", "def value_to_percent(value):\n return ...", "def test_percentage(self):\n metric = self.metric()\n sources = [\n self.source(metric, value=\"10\", total=\"70\"),\n self.source(metric, value=\"20\", total=\"50\"),\n ]\n measurement = self.measurement(metric, sources=sources)\n self.assertEqual(\"25\", measurement[\"percentage\"][\"value\"])", "def pct_status(self):\r\n # DEPRECATED: self.info.n_answers will be removed\r\n # DEPRECATED: use self.t.n_answers instead\r\n if (self.info.get('n_answers')):\r\n self.n_answers = int(self.info['n_answers'])\r\n if self.n_answers != 0 and self.n_answers != None:\r\n return float(len(self.task_runs)) / self.n_answers\r\n else: # pragma: no cover\r\n return float(0)", "def per100_top_stat_players(game_type, stat, player_pk, excluded_pks, season_id=None):\n season = None\n if season_id:\n season = bmodels.Season.objects.get(id=season_id)\n\n if player_pk:\n players = bmodels.Player.objects.filter(pk=player_pk)\n else:\n players = bmodels.Player.objects.all().exclude(\n Q(first_name__contains=\"Team\") | Q(pk__in=excluded_pks))\n player_list = []\n for player in players:\n if season:\n result = player.statline_set.filter(game__game_type=game_type, game__date__range=(\n season.start_date, season.end_date)).aggregate(Sum(stat), Sum('off_pos'))\n else:\n result = player.statline_set.filter(\n game__game_type=game_type).aggregate(Sum(stat), Sum('off_pos'))\n if result['off_pos__sum'] and result['off_pos__sum'] is not 0:\n percentage = (result[stat + '__sum'] /\n result['off_pos__sum']) * 100\n else:\n percentage = 0.0\n player_list.append((player.first_name, percentage))\n return sorted(player_list, key=lambda x: x[1], reverse=True)", "def percentage(self) -> str:\n return ranged_value_to_percentage(\n self._device.fan_speed_limits, self._device.fan_speed\n )", "def pct_helper(self,k,d,total):\n if k in d:\n return 100.0*d[k]/total\n else:\n return -100.0", "def pulsewidth2pct(pw): \n shifted = pw - 500.0\n scaled = shifted / 2000.0 * 100.0\n pct = scaled\n return pct", "def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0", "def per_hour(self):\n if self.is_salary():\n return 0.0\n return self.wage_cents / 100.0", "def percentage_used(self):\n return self.volume_used/self.total_volume * 100.0", "def percentage(a, b):\n return (a * 100.0) / b", "def percent_usage(value, total):\n if total:\n return float(value) * 100.0 / (float(total) + float(value))\n else:\n return 100.0", "def compute_stats(self):\n if self.stats is not None:\n return\n self.stats = np.zeros(STEPS_MAX + 1)\n for m in self.missions:\n m.compute_stats()\n self.stats += 100 * m.stats\n self.stats /= len(self.missions)", "def percentage(self):\n return sum(self.chunk_percentage) / self.total_steps", "def get_cpu_percent():\n return psutil.cpu_percent(interval=1, percpu=True)", "def calculate(self):\n\n s_sum = 0\n class_num = len(self.scores)\n \n for i in range(class_num):\n s_sum += self.scores[i]\n\n av = float(s_sum)/class_num\n if av >= 90:\n return 'O'\n elif av >= 80:\n return 'E'\n elif av >= 70:\n return 'A'\n elif av >= 55:\n return 'P'\n elif av >= 40:\n return 'D'\n else:\n return 'T'", "def percent_frequencies(self):\n word_count = 0\n local = self.frequencies()\n for key in local.keys():\n i = local[key]\n word_count += int(i)\n for key in local.keys():\n i = local[key]\n percentage = float(i) / float(word_count)\n local[key] = percentage\n return local", "def unit_of_measurement(self) -> Any:\n return PERCENTAGE", "def ucbScore(self,totalPlayedTimes):\n winRate = self.winRate()\n #print totalPlayedTimes\n #print self.playedTimes\n confidenceInterval = math.sqrt(2 * math.log(totalPlayedTimes,math.e) / self.playedTimes)\n \n return winRate + confidenceInterval", "def percent(value, total):\n if total:\n return float(value) * 100.0 / float(total)\n else:\n return 100.0", "def _percent(self, lines_total, lines_covered):\n\n if lines_total == 0:\n return '0.0'\n return str(float(float(lines_covered) / float(lines_total)))", "def walkout_percentage_average(df,start_year, end_year,bat_met, player_name):\n base_fields = ['PA']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n bb_val = round((pd.to_numeric(df['BB.'].str.split('%').str[0])/100)*df['PA'],0).sum()\n pa_total = df['PA'].fillna(0).sum()\n return \"{:.2%}\".format(bb_val / pa_total)\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n return walkout_percentage_average(df,start_year, end_year,bat_met, player_name)", "def get_player_stats() -> List[BaseStat]:\n return [BoostStat(),\n PositionalTendencies(),\n Averages(),\n BallDistanceStat(),\n ControlsStat(),\n SpeedTendencies(),\n CarryStat(),\n PerPossessionStat(),\n SpeedTendencies(),\n RumbleItemStat(),\n KickoffStat(),\n DropshotStats(),\n DemoStat()\n ]", "def GetResult(self, playerjm):\n return self.score / len(self.scores)", "def _get_cpu_percent(self):\n cpu_delta = None\n total_delta = None\n cpu_usage = 0\n try:\n cpu_usage2_time = time.time()\n cpu_usage2_usec = self._get_cgroups_cpu_usage_snapshot()\n if cpu_usage2_usec and self._cpu_usage1_usec:\n # elapsed cpu time our cgroup consumed in time period between measurements\n cpu_delta = cpu_usage2_usec - self._cpu_usage1_usec\n if self._cpu_usage1_time:\n time_delta = cpu_usage2_time - self._cpu_usage1_time\n # max possible cpu usage per one second adjusted to elapsed time between measurements\n total_delta = self._max_cpu_usage * time_delta\n if cpu_delta and total_delta:\n cpu_usage = round((cpu_delta / total_delta) * 100, 1)\n self._cpu_usage1_usec = cpu_usage2_usec\n self._cpu_usage1_time = cpu_usage2_time\n except BaseException:\n self._log.warning(f'Unable to determine cpu usage', exc_info=True)\n return cpu_usage", "def pulse_width_percent(self) -> float:", "def get_percentage_sf_votes(self):\n\n votes_f = self.get_num_f_votes()\n votes_sf = self.get_num_sf_votes()\n\n # avoid dividing by zero\n if votes_f + votes_sf == 0:\n return 0\n else:\n ratio = float(votes_sf)/(votes_f + votes_sf)\n return round(ratio * 100, 1)", "def set_total_reb_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n player_rebounds = bx[\"reb_def\"] + bx[\"reb_of\"]\n team_rebounds = team[\"reb_def\"] + team[\"reb_of\"]\n opp_team_rebounds = opp_team[\"reb_def\"] + opp_team[\"reb_of\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0 and bx[\"minutes\"] > 0:\n result = ((player_rebounds * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team_rebounds + opp_team_rebounds)))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.total_reb_percentage = \"%.2f\" % round(result, 2)", "def calculatePercentChange(self, oldValue, newValue):\n return (((newValue - oldValue)/oldValue)*100)", "def percent_updated(self):\n return self.percent_complete - self.previous_percent_complete", "def cps(self):\n return self.datacounts / self.exptime", "def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores", "def unit_of_measurement(self):\n return \"%\"", "def as_percentages(self):\n if self.e_pct is None:\n self.calculate_percentages()\n\n score_str = 'E/I: ' + str(self.e_pct) + '%/' + str(self.i_pct) + '%; '\n score_str += 'N/S: ' + str(self.n_pct) + '%/' + str(self.s_pct) + '%; '\n score_str += 'F/T: ' + str(self.f_pct) + '%/' + str(self.t_pct) + '%; '\n score_str += 'J/P: ' + str(self.j_pct) + '%/' + str(self.p_pct) + '%'\n return score_str", "def percentage(part, whole):\n return round((100 * float(part)/float(whole)),2)", "def get_percentage_f_votes(self):\n\n votes_f = self.get_num_f_votes()\n votes_sf = self.get_num_sf_votes()\n\n # avoid dividing by zero\n if votes_f + votes_sf == 0:\n return 0\n else:\n ratio = float(votes_f)/(votes_f + votes_sf)\n return round(ratio * 100, 1)", "def get_rate(timestamps):\n return (timestamps[1, 1] - timestamps[0, 1]) / (timestamps[1, 0])", "def percent_change(ts, ax=-1):\r\n ts = np.asarray(ts)\r\n\r\n return (ts / np.expand_dims(np.mean(ts, ax), ax) - 1) * 100", "def profile(self):\n return NumericStatsMixin.profile(self)", "def calc_stats(hits, misses):\n try:\n result = (float(misses) / float(hits)) * 100.0\n except ZeroDivisionError:\n if misses == 0:\n result = 0.0\n else:\n result = 100.0\n return result", "def _get_pace_percentage(pace: time) -> float:\n pace_values = list(reversed(\n [time(minute=x // 2, second=30 if isinstance(x / 2, float) else 0) for x in range(5, 16)]))\n\n if pace < pace_values[0]:\n return 1\n\n if pace > pace_values[-1]:\n return 2\n\n for i in range(1, len(pace_values)):\n if pace_values[i - 1] <= pace <= pace_values[i]:\n return 1 + i / 10", "def tsVs(self):\n self.__percentuale = self.ui.percentualeTs.value()", "def percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"percentage\")", "def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))", "def represent_total_percent(self, length):\n numpkgs = self.totals['numpkgs']\n dlpkgs = self.totals['dlpkgs']\n return self.represent_percent(dlpkgs, numpkgs, length)", "def get_estimated_percentage(self):\n now_id = now_as_id()\n message_id = self.last_message_id\n if message_id >= now_id:\n return 100.0\n \n channel_id = self.source_channel.id\n if channel_id >= message_id:\n return 0.0\n \n if self.is_polling_done():\n return 100.0\n \n return (1.0 - (now_id - message_id) / (now_id - channel_id)) * 100.0", "def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100", "def percent_of(part, whole):\n return part * 100 / whole", "def get_percentage(self):\n return self.PotTax_percentage", "def get_online_price_diff_percent_method(self):\n try:\n if self.overclockerskz and self.overclockerskz.online_price:\n return int((self.get_online_price_diff_method() / self.overclockerskz.online_price) * 100)\n else:\n return 0\n except (TypeError, ValueError):\n return 0", "def test_percentage_is_100(self):\n metric = self.metric(direction=\">\")\n sources = [self.source(metric, value=\"0\", total=\"0\")]\n measurement = self.measurement(metric, sources=sources)\n self.assertEqual(\"100\", measurement[\"percentage\"][\"value\"])", "def _get_percentages(games_table: pd.DataFrame, stats_table: pd.DataFrame,\n grouping_column: str) -> pd.DataFrame:\n stats_table[\n [\n \"total_free_throws_achieved\",\n \"total_free_throws_attempted\",\n \"total_two_point_achieved\",\n \"total_two_point_attempted\",\n \"total_three_point_achieved\",\n \"total_three_point_attempted\",\n ]\n ] = (\n games_table[\n [\n grouping_column,\n \"free_throws_achieved\",\n \"free_throws_attempted\",\n \"two_point_achieved\",\n \"two_point_attempted\",\n \"three_point_achieved\",\n \"three_point_attempted\",\n ]\n ]\n .groupby(grouping_column)\n .sum()\n .reset_index()\n .drop(grouping_column, axis=1)\n )\n\n stats_table[\"free_throws_pct\"] = (\n stats_table[\"total_free_throws_achieved\"] / stats_table[\"total_free_throws_attempted\"]\n )\n stats_table[\"two_point_pct\"] = (\n stats_table[\"total_two_point_achieved\"] / stats_table[\"total_two_point_attempted\"]\n )\n stats_table[\"three_point_pct\"] = (\n stats_table[\"total_three_point_achieved\"] / stats_table[\"total_three_point_attempted\"]\n )\n return stats_table", "def pc_throughput_avg(self) -> \"float\":\n return _beamforming_swig.doaesprit_sptr_pc_throughput_avg(self)", "def get_servo_pct(pi, pin):\n return pulsewidth2pct(pi.get_servo_pulsewidth(pin))", "def calculate_percent(self, total_number, some_number):\n\t\treturn (some_number * 100) / total_number", "def pc_throughput_avg(self) -> \"float\":\n return _beamforming_swig.randomsampler_sptr_pc_throughput_avg(self)", "def displayed_percent(self):\n return (self.displayed_words / self.total_words) * 100", "def _calculate_hours_percent(used_hours, estimated_hours):\n percent = (used_hours * 100) / estimated_hours\n return percent", "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def compute_drift_score(ref_col_prob, col_prob):\n\n return sum(abs(np.asarray(ref_col_prob) - np.array(col_prob)) * 100)", "def calculate_emission_prob(cls, w, t):\n #\n # p_w_t = cls._emission_counts[w, t]\n # p_t = cls._uni_transition_counts[t]\n\n return float(cls._emission_counts[w, t] / cls._uni_transition_counts[t])", "def cpu_percent(self):\n self.monitoring_object['cpu_percent'] = \\\n psutil.cpu_percent(interval=1, percpu=True)", "def get_vacuum_powerpercentact(self) -> int:\n\n return self.send(self.cmd.GET_VACUUM_POWERPERCENTACT)", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def set_effective_field_goal_percentage(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n tcConv = float(bx[\"t2p_conv\"] + bx[\"t3p_conv\"])\n result = 0.00\n if tcInt > 0:\n result = ((tcConv + (0.5 * float(bx[\"t3p_conv\"]))) / tcInt) * 100\n self.effective_field_goal_percentage = \"%.2f\" % round(result, 2)" ]
[ "0.67912775", "0.67874295", "0.66820705", "0.6615735", "0.6549017", "0.65139776", "0.65139776", "0.6465816", "0.64634633", "0.6446837", "0.64173675", "0.64117974", "0.6390257", "0.63858217", "0.6356354", "0.6316925", "0.6298419", "0.62818795", "0.6281119", "0.6268235", "0.6221657", "0.62200534", "0.6174268", "0.6174268", "0.61564356", "0.61564356", "0.61437255", "0.6129586", "0.61163414", "0.6111541", "0.60880846", "0.6076288", "0.6063487", "0.60439956", "0.6023627", "0.6006266", "0.5997345", "0.5974105", "0.5964852", "0.5962612", "0.59543926", "0.59269553", "0.592581", "0.5922709", "0.59221375", "0.59043634", "0.590287", "0.58928597", "0.5870334", "0.5866686", "0.5859223", "0.5853675", "0.5846657", "0.5822112", "0.58171606", "0.5810143", "0.580316", "0.5796781", "0.5795917", "0.5795528", "0.5784395", "0.57841456", "0.5766687", "0.57578266", "0.5743189", "0.5740383", "0.57392", "0.5735312", "0.57305497", "0.5728495", "0.5719273", "0.5712585", "0.5706635", "0.56964844", "0.56941295", "0.568894", "0.5687575", "0.56737196", "0.56710786", "0.56656027", "0.56603515", "0.5659293", "0.5659241", "0.56558007", "0.5654754", "0.56520814", "0.5633315", "0.5631197", "0.5629281", "0.5623366", "0.56229657", "0.5622891", "0.5621765", "0.5619767", "0.5617763", "0.56176233", "0.5615678", "0.56156546", "0.56130385", "0.5603134" ]
0.745896
0
Method which calculate USG% for each player from each team
def set_usg_percentage(self): bx = self.get_standard_stats() team = self.get_team_stats() tcInt = bx["t2p_int"] + bx["t3p_int"] a = tcInt + (Decimal('0.44')*bx["tl_int"]) + bx["turnovers"] b = team["minutes"]/5 c = (team["t2p_int"] + team["t3p_int"]) + (Decimal('0.44')*team["tl_int"]) + team["turnovers"] result = 0.00 if bx["minutes"] > 0: result = ((Decimal(a)*Decimal(b))/(bx["minutes"]*c))*100 self.usg_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT", "def update_calculated_stats(self, player_data):\n # Updates calculated statistics\n fga = player_data['FGA']\n fgm = player_data['FGM']\n pa3 = player_data['3FGA']\n pm3 = player_data['3FGM']\n try:\n player_data['FG%'] = fgm/fga\n except:\n player_data['FG%'] = 0.0\n try:\n player_data['3FG%'] = pm3/pa3\n except:\n player_data['3FG%'] = 0.0\n return(player_data)", "def simulate(team, N=100):\n\n total_score = 0.0\n for player in team:\n simulation_score = []\n for i in range(N):\n simulation_score.append(get_player_score(player))\n total_score += np.mean(simulation_score)\n\n return total_score", "def get_team_results(usrs, sched):\t\n\t\n\ttotal_consistency = 0\n\ttotal_completion = 0\n\tfor user in usrs:\n\t\tresult = get_consistency(user, sched)\n\t\t\n\t\ttotal_consistency += result[\"consistency\"]\n\t\ttotal_completion += result[\"completion\"]\n\t\n\tteam_consistency = 0\n\tteam_completion = 0\n\t\t\n\tif(len(usrs) != 0):\n\t\tteam_consistency = total_consistency / float(len(usrs))\n\t\tteam_completion = total_completion / float(len(usrs))\n\t\t\n\treturn { \"consistency\" : team_consistency, \"completion\" : team_completion }", "def getStats(population, masterList):\n for team in population:\n for i in range(13): #13 are the number of roster spots?\n team.totHr += masterList[team.roster[i]].hr\n team.totAvg += masterList[team.roster[i]].avg\n team.totRuns += masterList[team.roster[i]].runs\n team.totSb += masterList[team.roster[i]].sb\n team.totRbi += masterList[team.roster[i]].rbi\n if i == 12:\n team.totAvg = team.totAvg / 13\n return population", "def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent", "def set_assists_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n team_tc_conv = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n player_tc_conv = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0:\n result = (bx[\"assists\"] / (((bx[\"minutes\"] / (team[\"minutes\"] / 5)) * team_tc_conv) - player_tc_conv))*100\n result = result if result <= 100 and result >= 0 else 0\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.assists_percentage = \"%.2f\" % round(result, 2)", "def main(simulations, userschoice):\n # The teams data are obtained from FIFA statistics\n # Team Name, Attack, Defence\n quarters = ['quarter1', 'quarter2', 'quarter3', 'quarter4', 'quarter5', 'quarter6', 'quarter7', 'quarter8']\n semifinalists = ['semifinalist1', 'semifinalist2', 'semifinalist3', 'semifinalist4']\n finalists = ['finalist1', 'finalist2']\n\n df = pd.read_csv('FifaRankings.csv', index_col=\"Ranking\")\n a_set = set()\n while True:\n a_set.add(randint(42, 85))\n if len(a_set) == 32:\n break\n lst1 = sorted(list(a_set), reverse=True)\n\n a_set = set()\n while True:\n a_set.add(randint(38, 83))\n if len(a_set) == 32:\n break\n lst2 = sorted(list(a_set), reverse=True)\n print(\"\\n\")\n df['Attack'] = lst1\n df['Defence'] = lst2\n a = list(df[\"Team\"])\n\n avgScored = 0\n avgConceded = 0\n avgScored = df['Attack'].sum()\n avgConceded = df['Defence'].sum()\n\n avgScored = avgScored / len(df)\n avgConceded = avgConceded / len(df)\n print(\"\\n\")\n avgattack = []\n avgdefense = []\n\n for i in range(1, 33):\n if df['Matches Played'][i] != 0:\n win_rate = (df['WorldCup Wins'][i] / df['Matches Played'][i])\n else:\n win_rate = 0\n avgattack.append((df['Attack'][i] / avgScored) + win_rate)\n avgdefense.append((df['Defence'][i] / avgConceded) + win_rate)\n\n df['Avg Attack'] = avgattack\n df['Avg Defense'] = avgdefense\n\n\n teamstats=[]\n for i in range(1,len(df)+1):\n teaminfo=[]\n teaminfo = (df[\"Team\"][i], df['Avg Attack'][i], df['Avg Defense'][i])\n teaminfo=list(teaminfo)\n teamstats.append(teaminfo)\n\n germany = WorldCupTeam(\"GERMANY\", teamstats)\n brazil = WorldCupTeam(\"BRAZIL\", teamstats)\n belgium = WorldCupTeam(\"BELGIUM\", teamstats)\n portugal = WorldCupTeam(\"PORTUGAL\", teamstats)\n argentina = WorldCupTeam(\"ARGENTINA\", teamstats)\n france = WorldCupTeam(\"FRANCE\", teamstats)\n switzerland = WorldCupTeam(\"SWITZERLAND\", teamstats)\n spain = WorldCupTeam(\"SPAIN\", teamstats)\n russia = WorldCupTeam(\"RUSSIA\", teamstats)\n japan = WorldCupTeam(\"JAPAN\", teamstats)\n polland=WorldCupTeam(\"POLLAND\", teamstats)\n korea_republic = WorldCupTeam(\"KOREA REPUBLIC\", teamstats)\n england = WorldCupTeam(\"ENGLAND\", teamstats)\n denmark= WorldCupTeam(\"DENMARK\", teamstats)\n peru= WorldCupTeam(\"PERU\", teamstats)\n tunisia=WorldCupTeam(\"TUNISIA\", teamstats)\n mexico = WorldCupTeam(\"MEXICO\", teamstats)\n colombia = WorldCupTeam(\"COLOMBIA\", teamstats)\n uruguay = WorldCupTeam(\"URUGUAY\", teamstats)\n croatia = WorldCupTeam(\"CROATIA\", teamstats)\n australia = WorldCupTeam(\"AUSTRALIA\", teamstats)\n iceland=WorldCupTeam(\"ICELAND\", teamstats)\n sweden=WorldCupTeam(\"SWEDEN\", teamstats)\n costa_rica = WorldCupTeam(\"COSTA RICA\", teamstats)\n senegal=WorldCupTeam(\"SENEGAL\", teamstats)\n serbia=WorldCupTeam(\"SERBIA\", teamstats)\n morrocco=WorldCupTeam(\"MORROCCO\", teamstats)\n egypt=WorldCupTeam(\"EGYPT\", teamstats)\n nigeria = WorldCupTeam(\"NIGERIA\", teamstats)\n saudi_arabia=WorldCupTeam(\"SAUDI ARABIA\", teamstats)\n panama=WorldCupTeam(\"PANAMA\", teamstats)\n iran = WorldCupTeam(\"IRAN\", teamstats)\n\n\n #INPUT USERS CHOICE FOR FIXED CHOICE\n choices= [\"random\", \"Random\", \"RANDOM\"]\n choicess = [\"fixed\", \"Fixed\", \"FIXED\"]\n if userschoice in choices:\n countries = [germany, brazil, belgium, portugal, argentina, france, switzerland, spain, russia, japan, polland,\n korea_republic, england, denmark, peru, tunisia, mexico, colombia, uruguay, croatia, australia,\n iceland, sweden, costa_rica, senegal, serbia, morrocco, egypt, nigeria, saudi_arabia, panama, iran]\n finalresults = {}\n\n GroupA, GroupB, GroupC, GroupD, GroupE, GroupF, GroupG, GroupH = ([] for i in range(8))\n\n Groups = [GroupA, GroupB, GroupC, GroupD, GroupE, GroupF, GroupG, GroupH]\n for i in Groups:\n for j in range(4):\n teamname = choice(countries)\n i.append(teamname)\n countries.remove(teamname)\n\n print(\"DRAWS for the WorldCup 2018 are:\")\n print(\"\\n\")\n for i in range(simulations):\n # Play first stage\n print(\"Result of\", i + 1, \"simulations\")\n print(\"--------------------------------------------\")\n print(\"This is GROUP STAGE\")\n print(\"\\n\")\n print(\"GROUP A RESULTS\")\n print(\"\\n\")\n groupA = TeamPool(Groups[0])\n print(\"\\n\")\n print(\"GROUP B RESULTS\")\n print(\"\\n\")\n groupB = TeamPool(Groups[1])\n print(\"\\n\")\n print(\"GROUP C RESULTS\")\n print(\"\\n\")\n groupC = TeamPool(Groups[2])\n print(\"\\n\")\n print(\"GROUP D RESULTS\")\n print(\"\\n\")\n groupD = TeamPool(Groups[3])\n print(\"\\n\")\n print(\"GROUP E RESULTS\")\n print(\"\\n\")\n groupE = TeamPool(Groups[4])\n print(\"\\n\")\n print(\"GROUP F RESULTS\")\n print(\"\\n\")\n groupF = TeamPool(Groups[5])\n print(\"\\n\")\n print(\"GROUP G RESULTS\")\n print(\"\\n\")\n groupG = TeamPool(Groups[6])\n print(\"\\n\")\n print(\"GROUP H RESULTS\")\n print(\"\\n\")\n groupH = TeamPool(Groups[7])\n\n # Play second stage\n print(\"\\n\")\n print(\"ROUND OF 16\")\n print(\"\\n\")\n r16 = [groupA.first_qualified, groupA.second_qualified, groupB.first_qualified, groupB.second_qualified,\n groupC.first_qualified, groupC.second_qualified, groupD.first_qualified, groupD.second_qualified,\n groupE.first_qualified, groupE.second_qualified, groupF.first_qualified, groupF.second_qualified,\n groupG.first_qualified, groupG.second_qualified, groupH.first_qualified, groupH.second_qualified]\n\n\n GroupP, GroupQ, GroupR, GroupS, GroupT, GroupU, GroupV, GroupW =([] for i in range(8))\n\n round16groups = [GroupP, GroupQ, GroupR, GroupS, GroupT, GroupU, GroupV, GroupW]\n\n for k in round16groups:\n for j in range(2):\n teamname = choice(r16)\n k.append(teamname)\n r16.remove(teamname)\n\n for i in range(8):\n quarters[i]=WorldCupMatch(round16groups[i][0], round16groups[i][1], False).winner\n\n # Quarters\n print(\"\\n\")\n print(\"QUARTER - FINALS\")\n print(\"\\n\")\n quarterfinal = [quarters[0], quarters[1], quarters[2], quarters[3], quarters[4], quarters[5], quarters[6],\n quarters[7]]\n GroupA1, GroupB1, GroupC1, GroupD1 = ([] for i in range(4))\n\n quarterfinalgroups = [GroupA1, GroupB1, GroupC1, GroupD1]\n\n i = 0\n for i in quarterfinalgroups:\n for j in range(2):\n teamname = choice(quarterfinal)\n i.append(teamname)\n quarterfinal.remove(teamname)\n\n for i in range(4):\n semifinalists[i] = WorldCupMatch(quarterfinalgroups[i][0], quarterfinalgroups[i][1], False).winner\n\n # Semifinals\n print(\"\\n\")\n print(\"SEMI - FINALS\")\n print(\"\\n\")\n\n semifinal = [semifinalists[0], semifinalists[1], semifinalists[2], semifinalists[3]]\n GroupP1, GroupQ1 = ([] for i in range(2))\n semifinalgroups = [GroupP1, GroupQ1]\n\n i = 0\n for i in semifinalgroups:\n for j in range(2):\n teamname = choice(semifinal)\n i.append(teamname)\n semifinal.remove(teamname)\n\n for i in range(2):\n finalists[i] = WorldCupMatch(semifinalgroups[i][0], semifinalgroups[i][1], False).winner\n # Finals\n print(\"\\n\")\n print(\"WORLD-CUP FINAL\")\n print(\"\\n\")\n winner = WorldCupMatch(finalists[0], finalists[1], False).winner\n print(\"\\n\")\n\n if winner.name in finalresults:\n finalresults[winner.name] += 1\n else:\n finalresults[winner.name] = 1\n\n for key in sorted(finalresults, key=finalresults.get, reverse=True):\n print(key + \": \" + str(finalresults[key] / simulations))\n ro=(finalresults[key] / simulations) * 100\n print(str(ro) + \"% chance of winning the worldcup\")\n print(\"\\n\")\n print(\"\\n\")\n\n\n elif userschoice in choicess:\n\n print(\"\\n\")\n finalresults = {}\n groupA1 = [russia , saudi_arabia,egypt, uruguay]\n groupB1 = [portugal, spain, morrocco, iran]\n groupC1 = [france, australia, peru, denmark]\n groupD1 = [argentina, iceland, croatia, nigeria]\n groupE1 = [brazil, switzerland, costa_rica, serbia]\n groupF1 = [germany, mexico, sweden, korea_republic]\n groupG1 = [belgium, panama, tunisia, england]\n groupH1 = [polland, senegal, colombia, japan]\n print(\"\\n\")\n for i in range(simulations):\n # Play first stage\n print(\"Result of\", i+1 ,\"simulations\")\n print(\"--------------------------------------------\")\n print(\"This is GROUP STAGE\")\n print(\"\\n\")\n print(\"GROUP A RESULTS\")\n print(\"\\n\")\n groupA = TeamPool(groupA1)\n print(\"\\n\")\n print(\"GROUP B RESULTS\")\n print(\"\\n\")\n groupB = TeamPool(groupB1)\n print(\"\\n\")\n print(\"GROUP C RESULTS\")\n print(\"\\n\")\n groupC = TeamPool(groupC1)\n print(\"\\n\")\n print(\"GROUP D RESULTS\")\n print(\"\\n\")\n groupD = TeamPool(groupD1)\n print(\"\\n\")\n print(\"GROUP E RESULTS\")\n print(\"\\n\")\n groupE = TeamPool(groupE1)\n print(\"\\n\")\n print(\"GROUP F RESULTS\")\n print(\"\\n\")\n groupF = TeamPool(groupF1)\n print(\"\\n\")\n print(\"GROUP G RESULTS\")\n print(\"\\n\")\n groupG = TeamPool(groupG1)\n print(\"\\n\")\n print(\"GROUP H RESULTS\")\n print(\"\\n\")\n groupH = TeamPool(groupH1)\n print(\"Qualifies teams:\", groupH.first_qualified.name)\n\n # Play second stage\n print(\"\\n\")\n print(\"ROUND OF 16\")\n print(\"\\n\")\n\n quarter1 = WorldCupMatch(groupA.first_qualified, groupA.second_qualified, False).winner\n quarter2 = WorldCupMatch(groupB.first_qualified, groupB.second_qualified, False).winner\n quarter3 = WorldCupMatch(groupC.first_qualified, groupC.second_qualified, False).winner\n quarter4 = WorldCupMatch(groupD.first_qualified, groupD.second_qualified, False).winner\n quarter5 = WorldCupMatch(groupE.first_qualified, groupE.second_qualified, False).winner\n quarter6 = WorldCupMatch(groupF.first_qualified, groupF.second_qualified, False).winner\n quarter7 = WorldCupMatch(groupG.first_qualified, groupG.second_qualified, False).winner\n quarter8 = WorldCupMatch(groupH.first_qualified, groupH.second_qualified, False).winner\n\n # Quarters\n print(\"\\n\")\n print(\"QUARTER - FINALS\")\n print(\"\\n\")\n\n semifinalist1 = WorldCupMatch(quarter1, quarter2, False).winner\n semifinalist2 = WorldCupMatch(quarter3, quarter4, False).winner\n semifinalist3 = WorldCupMatch(quarter5, quarter6, False).winner\n semifinalist4 = WorldCupMatch( quarter7, quarter8, False).winner\n\n # Semifinals\n print(\"\\n\")\n print(\"SEMI - FINALS\")\n print(\"\\n\")\n finalist1 = WorldCupMatch(semifinalist1, semifinalist2, False).winner\n finalist2 = WorldCupMatch(semifinalist3, semifinalist4, False).winner\n\n # Final\n print(\"\\n\")\n print(\"WORLD-CUP FINAL\")\n print(\"\\n\")\n winner = WorldCupMatch(finalist1, finalist2, False).winner\n print(\"\\n\")\n\n\n if winner.name in finalresults:\n finalresults[winner.name] += 1\n else:\n finalresults[winner.name] = 1\n\n for key in sorted(finalresults, key=finalresults.get, reverse=True):\n print(key + \": \" + str(finalresults[key] / simulations))\n rou = (finalresults[key] / simulations) * 100\n print(str(rou) + \"% chance of winning the worldcup\")\n print(\"\\n\")\n print(\"\\n\")\n else:\n print(\"Please enter correct input and try again\")\n pass", "def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))", "def get_team_stats(players: list[Player]) -> dict[int]:\n\n team_stats = {}\n\n total_reaction = 0\n total_mechanical_skill = 0\n total_tactical_skill = 0\n total_game_knowledge = 0\n total_xp = 0\n\n for player in players:\n total_reaction += player.reaction\n total_mechanical_skill += player.mechanical_skill\n total_tactical_skill += player.tactical_skill\n total_game_knowledge += player.game_knowledge\n total_xp += player.xp\n\n team_stats.update(\n {\"reaction\": total_reaction,\n \"mechanical_skill\": total_mechanical_skill,\n \"tactical_skill\": total_tactical_skill,\n \"game_knowledge\": total_game_knowledge,\n \"xp\": total_xp})\n\n return team_stats", "def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))", "def _get_percentages(games_table: pd.DataFrame, stats_table: pd.DataFrame,\n grouping_column: str) -> pd.DataFrame:\n stats_table[\n [\n \"total_free_throws_achieved\",\n \"total_free_throws_attempted\",\n \"total_two_point_achieved\",\n \"total_two_point_attempted\",\n \"total_three_point_achieved\",\n \"total_three_point_attempted\",\n ]\n ] = (\n games_table[\n [\n grouping_column,\n \"free_throws_achieved\",\n \"free_throws_attempted\",\n \"two_point_achieved\",\n \"two_point_attempted\",\n \"three_point_achieved\",\n \"three_point_attempted\",\n ]\n ]\n .groupby(grouping_column)\n .sum()\n .reset_index()\n .drop(grouping_column, axis=1)\n )\n\n stats_table[\"free_throws_pct\"] = (\n stats_table[\"total_free_throws_achieved\"] / stats_table[\"total_free_throws_attempted\"]\n )\n stats_table[\"two_point_pct\"] = (\n stats_table[\"total_two_point_achieved\"] / stats_table[\"total_two_point_attempted\"]\n )\n stats_table[\"three_point_pct\"] = (\n stats_table[\"total_three_point_achieved\"] / stats_table[\"total_three_point_attempted\"]\n )\n return stats_table", "def processed_overall(self):\n self.processed_overall = (\n self.combine_both_winning_losing_games_stats\n .rename(columns={\"WTeamID\":\"TeamID\"})\n .pipe(lambda x:x.assign(fgp = x.total_fgm/x.total_fga))\n .pipe(lambda x:x.assign(fg3p = x.total_fg3m/x.total_fg3a))\n .pipe(lambda x:x.assign(ftp = x.total_ftm/x.total_fta))\n [['Season','TeamID','win_rate','total_score','total_opponent_score','fgp','fg3p','ftp', 'total_rebounds','total_off_rebounds','total_def_rebounds',\n 'total_off_rebounds_percent','total_def_rebounds_percent','total_rebound_possession_percent','total_rebound_possessiongain_percent','total_blocks',\n 'total_assists','total_steals','total_turnover','total_personalfoul','total_block_opp_FGA_percent','total_assist_per_fgm','total_assist_turnover_ratio',\n 'expectation_per_game','avg_lose_score_by','avg_win_score_by']]\n )", "def get_player_stats_from_game(team, year, week):", "def compute_stats(self):\n if self.stats is not None:\n return\n self.stats = np.zeros(STEPS_MAX + 1)\n for m in self.missions:\n m.compute_stats()\n self.stats += 100 * m.stats\n self.stats /= len(self.missions)", "def cal_hit_ratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n score = 0.0\n # golden items hit in the top_K items\n score_1 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)])) for i,d in top_k.groupby('user')])\n score_2 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)])) for i,d in top_k.groupby('user')])\n score = score_1 - score_2\n return score/full['user'].nunique()", "def ucbScore(self,totalPlayedTimes):\n winRate = self.winRate()\n #print totalPlayedTimes\n #print self.playedTimes\n confidenceInterval = math.sqrt(2 * math.log(totalPlayedTimes,math.e) / self.playedTimes)\n \n return winRate + confidenceInterval", "def winning_games_stats(self):\n self.winning_games_up_to_2013 = (\n self.df\n .pipe(lambda x:x.assign(winning_num_counts = 1))\n .query(\"Season <= 2013\")\n .groupby(['Season','WTeamID'])\n .agg({\"WScore\":\"sum\",\"WFGM\":\"sum\",\"WFGA\":\"sum\",\"WFGM3\":\"sum\",\"WFGA3\":\"sum\",\"WFTM\":\"sum\",\"WFTA\":\"sum\",\"LScore\":\"sum\",\"winning_num_counts\":\"sum\",\n \"WOR\":\"sum\",\"WDR\":\"sum\",\"LFGM\":\"sum\",\"LFGA\":\"sum\",\n \"WAst\":\"sum\",\"WTO\":\"sum\",\"WStl\":\"sum\",\"WBlk\":\"sum\",\"WPF\":\"sum\"})\n .reset_index()\n .rename(columns={\"LScore\":\"losing_opponent_score\"})\n # rebounds\n .pipe(lambda x:x.assign(total_winning_rebounds = x.WOR + x.WDR))\n .pipe(lambda x:x.assign(winning_off_rebounds_percent = x.WOR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(winning_def_rebounds_percent = x.WDR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(team_missed_attempts = x.WFGA - x.WFGM))\n .pipe(lambda x:x.assign(opp_team_missed_attempts = x.LFGA - x.LFGM))\n .pipe(lambda x:x.assign(winning_rebound_possession_percent = x.WOR/x.team_missed_attempts))\n .pipe(lambda x:x.assign(winning_rebound_possessiongain_percent = x.WDR/x.opp_team_missed_attempts))\n # blocks, steals, assists and turnovers\n .pipe(lambda x:x.assign(winning_block_opp_FGA_percent = x.WBlk/x.LFGA))\n .pipe(lambda x:x.assign(winning_assist_per_fgm = x.WAst/x.WFGM))\n .pipe(lambda x:x.assign(winning_assist_turnover_ratio = x.WAst/x.WTO))\n # rename columns to prevent duplication when joining with losing stats. example: WFGM_x\n .rename(columns={\"LFGA\":\"LFGA_opp\",\"LFGM\":\"LFGM_opp\"})\n )", "def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores", "def advancedStats():", "def set_total_reb_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n player_rebounds = bx[\"reb_def\"] + bx[\"reb_of\"]\n team_rebounds = team[\"reb_def\"] + team[\"reb_of\"]\n opp_team_rebounds = opp_team[\"reb_def\"] + opp_team[\"reb_of\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0 and bx[\"minutes\"] > 0:\n result = ((player_rebounds * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team_rebounds + opp_team_rebounds)))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.total_reb_percentage = \"%.2f\" % round(result, 2)", "def get_stats(self):\n\n win_points = 0\n lose_points = 0\n\n for username in self.bets:\n bet_for_win, points = self.bets[username]\n if bet_for_win:\n win_points += points\n else:\n lose_points += points\n\n return win_points, lose_points", "def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores", "def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)", "def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate", "def gamestats(self, table, curr_team):\n\n # Drop unneeded header \n tmp = table.iloc[1:,]\n # Fix the column names by reading line 0\n tmp.columns = [x.replace(\" \", \"\").replace(\"/\",\"\").replace(\".\",\"\") for x in tmp.iloc[0]]\n # Drop row zero which held the header row\n tmp = tmp.drop(tmp.index[0])\n # Forward fill the dates for defensive split later \n tmp['Date'].fillna(method='ffill', inplace = True)\n # Add in the team \n tmp['Team'] = curr_team\n # Create an offense/defense variable\n tmp['OffenseDefense'] = tmp['Opponent']\n # If it's not a defensive total then it's offense - set that in the offensedefense variable\n tmp['OffenseDefense'] = tmp['OffenseDefense'].apply(lambda x: \"Defense\" if x == \"Defensive Totals\" else \"Offense\")\n # Set the defensive totals in the opponent varaible to nullls\n tmp['Opponent'] = tmp['Opponent'].apply(lambda x: None if x == \"Defensive Totals\" else x)\n # Forward fill the opponents in for analysis later\n tmp['Opponent'].fillna(method='ffill', inplace = True)\n # Forward fill the results in for analysis later \n tmp['Result'].fillna(method='ffill', inplace = True)\n return tmp", "def calculate(self):\n\n s_sum = 0\n class_num = len(self.scores)\n \n for i in range(class_num):\n s_sum += self.scores[i]\n\n av = float(s_sum)/class_num\n if av >= 90:\n return 'O'\n elif av >= 80:\n return 'E'\n elif av >= 70:\n return 'A'\n elif av >= 55:\n return 'P'\n elif av >= 40:\n return 'D'\n else:\n return 'T'", "def test_get_team_strength(self):\n pass", "def update_mean_and_count(self, strat_profile, game_outcome):\n self.total_interactions += 1\n for k in range(self.G.n_players):\n self.mu[k][strat_profile] *= self.count[k][strat_profile]\n self.mu[k][strat_profile] += game_outcome[k]\n self.count[k][strat_profile] += 1\n self.mu[k][strat_profile] /= self.count[k][strat_profile]\n\n for s in self.V:\n self.count_history[s].append(self.count[0][s] /\n float(self.total_interactions))", "def stats(detections, faces):\n vp, fp, fn, vn = 0, 0, 0, 0\n max_label = np.max(faces[:, 0])\n for i in range(max_label + 1):\n detections_i = get_label_with_index(detections, i)\n faces_i = get_label_with_index(faces, i)\n local_vp = 0\n for face in faces_i:\n found = False\n for detection in detections_i:\n if intersection_ratio(face, detection) >= 0.5:\n found = True\n break\n if found:\n vp += 1\n local_vp += 1\n else:\n fn += 1\n fp += len(detections_i) - local_vp\n\n precision = vp / (vp + fp)\n rappel = vp / (vp + fn)\n f_score = 2 * ((precision * rappel) / (precision + rappel))\n\n return precision, rappel, f_score", "def find_pcts_multi(P, start_b = [], iter = 10000):\n assert len(P) >= 2\n wins_per_player = [0] * len(P)\n all_hole = reduce(lambda x,y: x+y, P)\n for i in range(iter):\n deck = Deck()\n need = 5 - len(start_b)\n b2 = draw_sure(deck, need, all_hole+start_b)\n s = [evaluator.evaluate(start_b+b2, h) for h in P]\n for i, e in enumerate(s):\n if e == min(s):\n wins_per_player[i] += 1\n return [float(x) / sum(wins_per_player) for x in wins_per_player]", "def cal_hit_gbratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n #print({d['user'].iloc[0]:d['ratings'].to_list() for i,d in top_k.groupby('user')})\n score = 0.0\n # golden items hit in the top_K items\n score_1 = {d['user'].iloc[0]:len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)]) for i,d in top_k.groupby('user')}\n score_2 = {d['user'].iloc[0]:len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)]) for i,d in top_k.groupby('user')} \n score_ratio = [(score_1[d]-score_2[d]/self._test_ratings[d]) if self._test_ratings[d]!=0 else 0 for d in self._test_ratings.keys()]\n\n #print(np.mean(score_ratio))\n #print(score_1)\n #score = score_1 + score_2\n return np.mean(score_ratio)", "def pct(self):\n\t\treturn self.bottle.pct()", "def per100_top_stat_players(game_type, stat, player_pk, excluded_pks, season_id=None):\n season = None\n if season_id:\n season = bmodels.Season.objects.get(id=season_id)\n\n if player_pk:\n players = bmodels.Player.objects.filter(pk=player_pk)\n else:\n players = bmodels.Player.objects.all().exclude(\n Q(first_name__contains=\"Team\") | Q(pk__in=excluded_pks))\n player_list = []\n for player in players:\n if season:\n result = player.statline_set.filter(game__game_type=game_type, game__date__range=(\n season.start_date, season.end_date)).aggregate(Sum(stat), Sum('off_pos'))\n else:\n result = player.statline_set.filter(\n game__game_type=game_type).aggregate(Sum(stat), Sum('off_pos'))\n if result['off_pos__sum'] and result['off_pos__sum'] is not 0:\n percentage = (result[stat + '__sum'] /\n result['off_pos__sum']) * 100\n else:\n percentage = 0.0\n player_list.append((player.first_name, percentage))\n return sorted(player_list, key=lambda x: x[1], reverse=True)", "def percentage_40(set_):\n db = TinyDB(CARD_DATA_FILE)\n card_data = db.table('card_data')\n total = card_data.count(where('set') == set_)\n q = Query()\n num_forties = card_data.count((q.set == set_) & (q.commons == 4) & (q.rares == 1))\n\n print(num_forties/total)", "def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n #Between 1-8\n return own_moves / opp_moves", "async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )", "def update_stats(home_player: Player, away_player: Player,\n home_score: int, away_score: int) -> None:\n home_player.goals_for += home_score\n home_player.goals_against += away_score\n away_player.goals_for += away_score\n away_player.goals_against += home_score\n\n # home won\n if (home_score > away_score):\n home_player.wins += 1\n away_player.losses += 1\n # away won\n elif (home_score < away_score):\n home_player.losses += 1\n away_player.wins += 1\n # draw\n else:\n home_player.draws += 1\n away_player.draws += 1", "def calculate_uptake_rate(team_members, issues):\n team_uptake_rate = {name: len([issue for issue in issues.values() if issue['assigned_to'] == name])/\n float(len([issue for issue in issues.values() if issue['assigned_to']]))\n for name in team_members}\n return team_uptake_rate", "def getPercent(*args):", "def getPercent(*args):", "def percentage(count, total):\n return count / total * 100", "def add_player_derived_stats(pl_stats, team_stats, opp_stats):\n pl_stats['FGP'] = gen_derived_var(pl_stats['FG'], pl_stats['FGA'])\n pl_stats['FTP'] = gen_derived_var(pl_stats['FT'], pl_stats['FTA'])\n pl_stats['THRP'] = gen_derived_var(pl_stats['THR'], pl_stats['THRA'])\n pl_stats['EFGP'] = gen_derived_var(pl_stats['FG'] + 0.5 *\n pl_stats['THR'], pl_stats['FGA'])\n pl_stats['TSA'] = pl_stats['FGA'] + 0.44 * pl_stats['FTA']\n pl_stats['TSP'] = gen_derived_var(pl_stats['PTS'], 2 * pl_stats['TSA'])\n pl_stats['THRAr'] = gen_derived_var(pl_stats['THRA'], pl_stats['FGA'])\n pl_stats['FTAr'] = gen_derived_var(pl_stats['FTA'], pl_stats['FGA'])\n pl_stats['TWOAr'] = gen_derived_var(pl_stats['TWOA'], pl_stats['FGA'])\n pl_stats['TWOP'] = gen_derived_var(pl_stats['TWO'], pl_stats['TWOA'])\n pl_stats['ORBr'] = gen_derived_var(pl_stats['ORB'], pl_stats['TRB'])\n pl_stats['DRBr'] = gen_derived_var(pl_stats['DRB'], pl_stats['TRB'])\n pl_stats['AST_to_TOV'] = gen_derived_var(pl_stats['AST'], pl_stats['TOV'])\n pl_stats['STL_to_TOV'] = gen_derived_var(pl_stats['STL'], pl_stats['TOV'])\n pl_stats['FIC'] = (pl_stats['PTS'] + pl_stats['ORB'] + 0.75 * pl_stats['DRB'] +\n pl_stats['AST'] + pl_stats['STL'] + pl_stats['BLK'] - 0.75 *\n pl_stats['FGA'] - 0.375 * pl_stats['FTA'] -\n pl_stats['TOV'] - 0.5 * pl_stats['PF'])\n pl_stats['FT_to_FGA'] = gen_derived_var(pl_stats['FT'], pl_stats['FGA'])\n\n team_stats['OPOS'] = gen_possessions(pl_stats, opp_stats)\n team_stats['DPOS'] = gen_possessions(opp_stats, pl_stats)\n team_stats['PACE'] = 48 * ((team_stats['OPOS'] + team_stats['DPOS']) / (2 * (float(team_stats['MP']) / 5)))\n\n # test for None\n pl_stats['ORBP'] = 100.0 * (pl_stats['ORB'] * (team_stats['MP'] / 5)) / (float(pl_stats['MP']) * (team_stats['ORB'] + opp_stats['DRB']))\n pl_stats['DRBP'] = 100.0 * (pl_stats['DRB'] * (team_stats['MP'] / 5)) / (float(pl_stats['MP']) * (team_stats['DRB'] + opp_stats['ORB']))\n pl_stats['TRBP'] = 100.0 * (pl_stats['TRB'] * (team_stats['MP'] / 5)) / (float(pl_stats['MP']) * (team_stats['TRB'] + opp_stats['TRB']))\n pl_stats['ASTP'] = 100.0 * pl_stats['AST'] / (((float(pl_stats['MP']) / (team_stats['MP'] / 5)) * team_stats['FG']) - pl_stats['FG'])\n pl_stats['STLP'] = 100.0 * (pl_stats['STL'] * (team_stats['MP'] / 5)) / (float(pl_stats['MP']) * team_stats['DPOS'])\n pl_stats['BLKP'] = 100.0 * (pl_stats['BLK'] * (team_stats['MP'] / 5)) / (float(pl_stats['MP']) * (opp_stats['FGA'] - opp_stats['THRA']))\n try:\n pl_stats['TOVP'] = 100.0 * pl_stats['TOV'] / (pl_stats['FGA'] + 0.44 * pl_stats['FTA'] + pl_stats['TOV'])\n except ZeroDivisionError:\n pl_stats['TOVP'] = None\n pl_stats['HOB'] = gen_derived_var(pl_stats['FG'] + pl_stats['AST'], team_stats['FG'])\n # pl_stats['+/-'] = pl_stats['+/-'] / pl_stats['N']", "def __countPlayers(self, players):\n\n numLow = sum(map(lambda p: p.lowFps, players))\n numHigh = sum(map(lambda p: p.highFps, players))\n numMed = len(players) - numLow - numHigh\n\n return '%s, %s, %s' % (numLow, numMed, numHigh)", "def get_score_percent(self, value):\n qs_related = RoundData.objects.prefetch_related(\n 'shotdata').select_related('shotdata')\n\n round_holes = int(self.round_type)\n\n if value == 'par':\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'birdie_better':\n return round((qs_related.filter(shotdata__nr_strokes__lt=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'tbogey_worse':\n return round((qs_related.filter(shotdata__nr_strokes__gte=F('shotdata__hole__par')+3).count()/round_holes), 2)\n if isinstance(value, int):\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par') + value).count()/round_holes), 2)", "def walkout_percentage_average(df,start_year, end_year,bat_met, player_name):\n base_fields = ['PA']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n bb_val = round((pd.to_numeric(df['BB.'].str.split('%').str[0])/100)*df['PA'],0).sum()\n pa_total = df['PA'].fillna(0).sum()\n return \"{:.2%}\".format(bb_val / pa_total)\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n return walkout_percentage_average(df,start_year, end_year,bat_met, player_name)", "def get_fool_ratio(self, test_acc, attack_accs):\n return [round(100*((test_acc - attack_acc) / test_acc), 2) for attack_acc in attack_accs]", "def calc_stats(hits, misses):\n try:\n result = (float(misses) / float(hits)) * 100.0\n except ZeroDivisionError:\n if misses == 0:\n result = 0.0\n else:\n result = 100.0\n return result", "def calculate_score(player_cards):\n score = sum(player_cards)\n return score", "def scoreTeams(curTeams, oppTeam, pokedex, league, minDistWanted):\n battleData, htmlData = loadBattleData(league)\n similarities = loadSims() \n \n #If not given an opponent team then simply randomly choose losers from the dataset to compare to.\n if len(oppTeam) == 0:\n picks = set([])\n while (len(picks) < NUMLOSINGTEAMS and (not len(picks) == len(battleData))):\n picks.add(random.randint(0,len(battleData)-1))\n\n losers = []\n loserDict = {}\n for i in picks:\n entry = battleData[i]\n winner,loser = determineWinner(entry)\n loserDict[str(loser)] = [winner]\n losers.append( (loser,0) )\n\n #Given opponent team then find similar teams\n else:\n oppTeam = [getSimPokemon(opp,similarities) for opp in oppTeam]\n\n #create dictionary from losers team to the team that beat them.\n loserDict = {}\n sims = []\n for d in battleData:\n winner, loser = determineWinner(d)\n\n wTeam = teamToArray(winner,pokedex)\n lTeam = np.array(teamToArray(loser, pokedex))\n\n score = 0\n for oppNp in oppTeam:\n score+= np.amax(lTeam*oppNp) \n\n if str(loser) in loserDict:\n loserDict[str(loser)].append(winner)\n else:\n #new to dictonary\n loserDict[str(loser)] = [winner]\n\n sims.append((loser, score))\n\n\n sims = sorted(sims, key = lambda x : x[1], reverse = True)\n\n cutoff = min(len(sims),NUMLOSINGTEAMS)\n losers = sims[:cutoff]\n\n #Gather winners to losing teams\n winnersComp = []\n for loser,_ in losers:\n for winner in loserDict[str(loser)]:\n winnersComp.append(teamToArray(winner,pokedex))\n \n topScore = len(winnersComp)*6 #pkmn team size\n\n results = []\n inverted_idx = {}\n\n existsSet = []\n\n #Creates inverted index for teams, while simoultaneously weeding out any teams that are exactly similar.\n for i in range(len(curTeams)):\n team = curTeams[i]\n results.append((team,0))\n sTeam = set(team)\n if not (sTeam in existsSet):\n existsSet.append(sTeam)\n for pkm in team:\n if pkm != EMPTY:\n if pkm in inverted_idx:\n inverted_idx[pkm].append(i)\n else:\n inverted_idx[pkm] = [i]\n \n #Giving the similiarity scores to the winners based off of the inverted index.\n for pkm in inverted_idx:\n for winner in winnersComp:\n wArr = np.array(winner)\n #tArr = getSimPokemon(pkm,similarities)\n tArr = similarities[pkm]\n \n vals = wArr * tArr\n\n score = np.amax(vals)\n\n for i in inverted_idx[pkm]:\n results[i] = (results[i][0],results[i][1]+(score/topScore))\n\n results = sorted(results, key = lambda x : x[1], reverse = True)\n\n if len(results) < NUMTEAMSRETURN:\n if len(results) == 0:\n returnTeams = [[] for x in range(NUMTEAMSRETURN)]\n teamScores = [0 for x in range(NUMTEAMSRETURN)]\n\n else:\n returnTeams = [result[0] for result in results]\n teamScores = [result[1] for result in results]\n else:\n firstResult, firstScore = results[0]\n returnTeams = [firstResult]\n teamScores = [round(firstScore*100,1)]\n returnSets = [set(firstResult)]\n \n i = 1\n\n #Loops through results and adds teams with the proper edit distance away.\n while(len(returnTeams) < NUMTEAMSRETURN and minDistWanted > 0):\n teamToConsider,teamToConsiderScore = results[i]\n \n considerSet = set(teamToConsider)\n add = True\n ##checks the edit distance of teams is above wanted\n for team in returnSets:\n if len(team.union(considerSet)) < len(team)+minDistWanted:\n add = False\n\n ##If indeed above wanted levels then add\n if add:\n returnTeams.append(teamToConsider)\n returnSets.append(considerSet)\n teamScores.append(round(teamToConsiderScore*100,1))\n \n i+=1\n\n if i >= len(results):\n i = 1\n minDistWanted -= 1 \n \n winHtmls = []\n if htmlData != None:\n for team,_ in losers:\n for winner in loserDict[str(team)]:\n winHtmls.extend(htmlData[str(sorted(winner))])\n \n\n return returnTeams, teamScores, winHtmls", "def updateSuit(self, playersView: Player, ofPlayer: Player, suit: Suit):\n deck = [Card(suit, val) for val in range(2, 14 + 1)]\n\n playersProb = self[playersView, ofPlayer, :]\n\n for otherPlayer in Player:\n if otherPlayer != playersView and otherPlayer != ofPlayer:\n for card in deck:\n self[playersView, otherPlayer, card] += (playersProb[card.__hash__()] / 2)\n\n for card in deck:\n self[playersView, ofPlayer, card] = 0", "def marcels_players(goalie, date, df):\n # 0 = that year, 1 is year b4 ....\n marcel_weights = [.36, .29, .21, .14]\n reg_const = 2000\n reg_avg = 0 # Where to regress to\n\n # Use past 3 season to weight games played -> Just take weighted average\n gp_weights = [8, 4, 2, 0]\n\n season = int(helpers.get_season(date))\n\n weighted_goals_sum, weighted_fen_sum, weighted_xg_sum, weights_marcel_sum = 0, 0, 0, 0\n weighted_gp_sum, weights_gp_sum = 0, 0\n\n # Past 4 Seasons\n for i in range(0, 4):\n if season - i > 2006:\n # Subset from stats df\n df_goalie = df[(df['player'] == goalie) & (df['season'] == (season - i))]\n\n # Sanity Check\n if df_goalie.shape[0] > 1:\n print(\"Too many rows!!!!!!!\")\n exit()\n\n # If he played that year\n if not df_goalie.empty:\n weighted_goals_sum += df_goalie.iloc[0]['goals_a'] * marcel_weights[i]\n weighted_fen_sum += df_goalie.iloc[0]['fenwick_a'] * marcel_weights[i]\n weighted_xg_sum += df_goalie.iloc[0]['xg_a'] * marcel_weights[i]\n weighted_gp_sum += df_goalie.iloc[0]['games'] * gp_weights[i]\n\n # -> To divide by at end...normalize everything\n weights_marcel_sum += marcel_weights[i]\n weights_gp_sum += gp_weights[i]\n\n # Normalize weighted sums\n weighted_xg_sum = weighted_xg_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n weighted_goals_sum = weighted_goals_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n weighted_fen_sum = weighted_fen_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n\n # Get Regressed fsv%\n if weighted_fen_sum != 0:\n weighted_adj_fsv = ((1 - weighted_goals_sum / weighted_fen_sum) - (1 - weighted_xg_sum / weighted_fen_sum)) * 100\n else:\n weighted_adj_fsv = 0\n reg_adj_fsv = weighted_adj_fsv - ((weighted_adj_fsv - reg_avg) * (reg_const / (reg_const + weighted_fen_sum)))\n\n # Get weighted gp\n weighted_gp_sum = weighted_gp_sum / weights_gp_sum if weights_gp_sum != 0 else 0\n\n return {'fsv': reg_adj_fsv, 'gp': weighted_gp_sum}", "def set_steals_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n poss = self.get_team_possessions()\n result = 0.00\n if bx[\"minutes\"] > 0:\n result = ((bx[\"steals\"] * (team[\"minutes\"]/Decimal('5'))) / Decimal(float(bx[\"minutes\"]) * poss)) * 100\n self.steals_percentage = \"%.2f\" % round(result, 2)", "def set_tichu_percentage(threshold):\n tichu_threshold = threshold\n tichu_cnt = 0\n deck = Deck()\n players = [Player(id=0), Player(id=1), Player(id=2), Player(id=3)]\n for i in range(100):\n myhands = deck.shuffle_and_deal()\n for idx in range(4):\n players[idx].assign_hand(myhands[idx])\n score = players[idx].hand_rating\n if score > tichu_threshold:\n tichu_cnt += 1\n players[idx].hand.show()\n print('Player calls Tichu with a hand rating of {:.1f}.'.format(score))\n print('\\n')\n print('Tichu percentage: {:.2f}'.format(tichu_cnt/100))", "def get_effective_team_stats(team_stats: dict) -> int:\n\n # TODO: Problem effective stats <-> value\n # value = int((reaction * 0.5 + technical_skill * 1.5 + tactical_skill * 1.5 + game_knowledge) * xp)\n\n stats_sum = 0\n\n for value in team_stats.values():\n stats_sum += value * random.uniform(0.5, 1.5)\n\n return stats_sum", "def get_scores(self, tournament: Tournament):\n self.model.eval()\n # collate_fn = lambda x: collate_teams(x, tournament.max_members)\n dl_rank = DataLoader(tournament.ranking, num_workers=self.jobs, batch_size=self.bs, shuffle=False)\n iterator = tqdm(dl_rank, position=0, desc=f'{tournament.tournament_id} ranking', disable=True)\n scores = []\n for i, team in enumerate(iterator):\n score = self.model.get_team_score(team.to(self.device))\n scores.append(score.cpu().numpy())\n\n scores = np.concatenate(scores)\n return scores.flatten()", "def get_team_scores(self, team, include_home=True, include_away=True):\n if include_away:\n away_games = list(filter(lambda g: team == g.AwayTeam, self.games))\n else:\n away_games = []\n\n if include_home:\n home_games = list(filter(lambda g: team == g.HomeTeam, self.games))\n else:\n home_games = []\n\n scored_h = [g.FTHG for g in home_games]\n scored_a = [g.FTAG for g in away_games]\n\n conceded_h = [g.FTAG for g in home_games]\n conceded_a = [g.FTHG for g in away_games]\n\n try:\n mean_gd = mean(scored_h + scored_a) - mean(conceded_h + conceded_a)\n home_gd = mean(scored_h) - mean(conceded_h)\n home_adv = home_gd - mean_gd\n except Exception:\n home_adv = 0\n\n return {\n \"scored_xg\": scored_h + scored_a,\n \"conceded_xg\": conceded_h + conceded_a,\n \"home_adv\": home_adv,\n \"expected_points\": self.get_table(metric='points')[team] /\n len(home_games + away_games)\n }", "def set_assists_per_turnover(self):\n bx = self.get_standard_stats()\n ratio = bx[\"assists\"]\n if bx[\"turnovers\"] > 0:\n ratio = bx[\"assists\"] / bx[\"turnovers\"]\n self.assists_per_turnover = \"%.2f\" % round(ratio, 2)", "def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)", "def get_new_ratings(players, teams):\n nb_players_team0 = len(teams[0])\n nb_players_team1 = len(teams[1])\n winner = players[teams[0][0]]\n loser = players[teams[1][0]]\n if nb_players_team0 == 1 and nb_players_team1 == 1:\n new_r1, new_r3 = rate_1vs1(winner,loser)\n elif nb_players_team0 == 1 and nb_players_team1 > 1:\n team_loser = [loser, players[teams[1][1]]]\n (new_r1), (new_r3, new_r4) = rate([winner, team_loser], ranks=[0, 1]) \n elif nb_players_team0 > 1 and nb_players_team1 == 1:\n team_winner = [winner, players[teams[0][1]]]\n (new_r1, new_r2), (new_r3) = rate([team_winner, loser], ranks=[0, 1]) \n else:\n team_loser = [loser, players[teams[1][1]]]\n team_winner = [winner, players[teams[0][1]]]\n (new_r1, new_r2), (new_r3, new_r4) = rate([team_winner, team_loser], ranks=[0, 1]) \n player1 = {'name': teams[0][0], 'mu': new_r1.mu, 'sigma': new_r1.sigma}\n player3 = {'name': teams[1][0], 'mu': new_r3.mu, 'sigma': new_r3.sigma}\n if nb_players_team0 > 1:\n player2 = {'name': teams[0][1], 'mu': new_r2.mu, 'sigma': new_r2.sigma}\n if nb_players_team1 > 1:\n player4 = {'name': teams[1][1], 'mu': new_r4.mu, 'sigma': new_r4.sigma}\n if nb_players_team0 > 1:\n return [player1, player2, player3, player4]\n return [player1, player2, player4]\n return [player1, player3]", "def U_Function(currentPlayer, oppositePlayer, N, maxEntity):\n EndPointPlayer1 = Posisi.Posisi(N - 1, N - 1)\n EndPointPlayer2 = Posisi.Posisi(0, 0)\n \n sumPionPlayer1 = 0\n sumPionPlayer2 = 0\n\n if (currentPlayer.noPlayer == 1):\n for Pion in currentPlayer.arrayPion:\n sumPionPlayer1 += Pion.currentPosition.euclidean(EndPointPlayer1)\n for Pion in oppositePlayer.arrayPion:\n sumPionPlayer2 += Pion.currentPosition.euclidean(EndPointPlayer2)\n \n if (currentPlayer.noPlayer == 2):\n for Pion in currentPlayer.arrayPion:\n sumPionPlayer2 += Pion.currentPosition.euclidean(EndPointPlayer2)\n for Pion in oppositePlayer.arrayPion:\n sumPionPlayer1 += Pion.currentPosition.euclidean(EndPointPlayer1)\n\n if (maxEntity == 1):\n return -sumPionPlayer1 + sumPionPlayer2\n else:\n return -sumPionPlayer2 + sumPionPlayer1", "def add_team_derived_stats(stats, opp_stats):\n stats['FGP'] = gen_derived_var(stats['FG'], stats['FGA'])\n stats['FTP'] = gen_derived_var(stats['FT'], stats['FTA'])\n stats['THRP'] = gen_derived_var(stats['THR'], stats['THRA'])\n stats['EFGP'] = gen_derived_var(stats['FG'] + 0.5 *\n stats['THR'], stats['FGA'])\n stats['TSA'] = stats['FGA'] + 0.44 * stats['FTA']\n stats['TSP'] = gen_derived_var(stats['PTS'], 2 * stats['TSA'])\n stats['THRAr'] = gen_derived_var(stats['THRA'], stats['FGA'])\n stats['FTAr'] = gen_derived_var(stats['FTA'], stats['FGA'])\n stats['TWOAr'] = gen_derived_var(stats['TWOA'], stats['FGA'])\n stats['TWOP'] = gen_derived_var(stats['TWO'], stats['TWOA'])\n stats['ORBr'] = gen_derived_var(stats['ORB'], stats['TRB'])\n stats['DRBr'] = gen_derived_var(stats['DRB'], stats['TRB'])\n stats['AST_to_TOV'] = gen_derived_var(stats['AST'], stats['TOV'])\n stats['STL_to_TOV'] = gen_derived_var(stats['STL'], stats['TOV'])\n stats['FIC'] = (stats['PTS'] + stats['ORB'] + 0.75 * stats['DRB'] +\n stats['AST'] + stats['STL'] + stats['BLK'] - 0.75 *\n stats['FGA'] - 0.375 * stats['FTA'] -\n stats['TOV'] - 0.5 * stats['PF'])\n stats['FT_to_FGA'] = gen_derived_var(stats['FT'], stats['FGA'])\n\n stats['OPOS'] = gen_possessions(stats, opp_stats)\n stats['DPOS'] = gen_possessions(opp_stats, stats)\n stats['PACE'] = 48 * ((stats['OPOS'] + stats['DPOS']) / (2 * (float(stats['MP']) / 5)))\n\n stats['ORBP'] = stats['ORB'] / (stats['ORB'] + opp_stats['DRB'])\n stats['DRBP'] = stats['DRB'] / (stats['DRB'] + opp_stats['ORB'])\n stats['TRBP'] = stats['TRB'] / (stats['TRB'] + opp_stats['TRB'])\n stats['ASTP'] = stats['AST'] / stats['FG']\n stats['STLP'] = stats['STL'] / stats['DPOS']\n stats['BLKP'] = stats['BLK'] / opp_stats['TWOA']\n stats['TOVP'] = stats['TOV'] / stats['OPOS']\n # stats['+/-'] = stats['+/-'] / stats['N']", "def score(self,player, board):\r\n numPlayer = 0\r\n numOpp = 0\r\n for i in self.squares():\r\n if board[i] == player:\r\n numPlayer+= SQUARE_WEIGHTS[i]\r\n else:\r\n numOpp+=SQUARE_WEIGHTS[i]\r\n return numPlayer-numOpp", "def calc_adv_U(self):\n num_U = 0\n adv_U = numpy.zeros((3,3), float)\n\n for atm in self:\n ## use the atom's U matrix if it exists, otherwise use the\n ## temperature factor\n\n if atm.U is not None:\n adv_U += atm.U\n num_U += 1\n\n return adv_U / num_U", "def recalculate_popularity(self):\n self.voters = 0\n for x in self.votes:\n self.voters += 1\n if x.good:\n self.popularity += 1\n else:\n self.popularity -= 1", "def find_pcts(p1, p2, start_b = [], iter = 10000):\n win_record = []\n for i in range(iter):\n deck = Deck()\n need = 5 - len(start_b)\n b2 = draw_sure(deck, need, p1+p2+start_b)\n win_record.append(_who_wins(start_b + b2, p1, p2, printout = False))\n return [win_record.count(1) / float(len(win_record)), \n win_record.count(2) / float(len(win_record))\n ]", "def _compute_global_stats():\n global_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Check how many HITs have been completed. We now consider a HIT to be\n # completed once it has been annotated by one or more annotators.\n #\n # Before we required `hit.users.count() >= 3` for greater overlap.\n hits_completed = HIT.objects.filter(mturk_only=False, completed=True).count()\n \n # Check any remaining active HITs which are not yet marked complete.\n for hit in HIT.objects.filter(active=True, mturk_only=False, completed=False):\n if hit.users.count() >= 1:\n hits_completed = hits_completed + 1\n hit.completed = True\n hit.save()\n \n # Compute remaining HITs for all language pairs.\n hits_remaining = HIT.compute_remaining_hits()\n \n # Compute number of results contributed so far.\n ranking_results = RankingResult.objects.filter(\n item__hit__completed=True, item__hit__mturk_only=False)\n \n from math import factorial\n system_comparisons = 0\n for result in ranking_results:\n result.reload_dynamic_fields()\n # TODO: this implicitly counts A=B comparisons for multi systems.\n # Basically, inflating the number of pairwise comparisons... Fix!\n combinations = factorial(result.systems)/(factorial(result.systems-2) * 2) if result.systems > 2 else 0\n system_comparisons = system_comparisons + combinations\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # Compute average/total duration over all results.\n durations = RankingResult.objects.all().values_list('duration', flat=True)\n total_time = sum([datetime_to_seconds(x) for x in durations])\n avg_time = total_time / float(hits_completed or 1)\n avg_user_time = total_time / float(3 * hits_completed or 1)\n \n global_stats.append(('Users', len(wmt16_users)))\n global_stats.append(('Groups', len(groups)))\n global_stats.append(('HITs completed', '{0:,}'.format(hits_completed)))\n global_stats.append(('HITs remaining', '{0:,}'.format(hits_remaining)))\n global_stats.append(('Ranking results', '{0:,}'.format(ranking_results.count())))\n global_stats.append(('System comparisons', '{0:,}'.format(system_comparisons)))\n global_stats.append(('Average duration (per HIT)', seconds_to_timedelta(avg_time)))\n global_stats.append(('Average duration (per task)', seconds_to_timedelta(avg_user_time)))\n global_stats.append(('Total duration', seconds_to_timedelta(total_time)))\n \n # Create new status data snapshot\n TimedKeyValueData.update_status_if_changed('users', str(len(wmt16_users)))\n TimedKeyValueData.update_status_if_changed('groups', str(len(groups)))\n TimedKeyValueData.update_status_if_changed('hits_completed', str(hits_completed))\n TimedKeyValueData.update_status_if_changed('hits_remaining', str(hits_remaining))\n TimedKeyValueData.update_status_if_changed('ranking_results', str(ranking_results.count()))\n TimedKeyValueData.update_status_if_changed('system_comparisons', str(system_comparisons))\n TimedKeyValueData.update_status_if_changed('duration_per_hit', str(seconds_to_timedelta(avg_time)))\n TimedKeyValueData.update_status_if_changed('duration_per_task', str(seconds_to_timedelta(avg_user_time)))\n TimedKeyValueData.update_status_if_changed('duration_total', str(seconds_to_timedelta(total_time)))\n \n return global_stats", "def get_team_stats() -> List[BaseStat]:\n return [PossessionStat(),\n TeamTendencies(),\n RelativeTendencies(),\n PerPossessionStat(),\n RumbleItemStat(),\n PreRumbleGoals(),\n DropshotStats()\n ]", "def evaluate_power(soldier_list: List[Soldier]):\n inf_count = 0\n inf_avg_weapon = 0.0\n inf_avg_armor = 0.0\n arc_count = 0\n arc_avg_weapon = 0.0\n arc_avg_armor = 0.0\n cvl_count = 0\n cvl_avg_weapon = 0.0\n cvl_avg_armor = 0.0\n \n for soldier in soldier_list:\n ################################# YOUR CODE HERE #################################\n if soldier.typecode == \"ARC\":\n arc_count += 1\n arc_avg_armor += soldier.armor\n arc_avg_weapon += soldier.weapon\n elif soldier.typecode == \"INF\":\n inf_count += 1\n inf_avg_armor += soldier.armor\n inf_avg_weapon += soldier.weapon\n elif soldier.typecode == \"CVL\":\n cvl_count += 1\n cvl_avg_armor += soldier.armor\n cvl_avg_weapon += soldier.weapon\n if arc_count != 0:\n arc_avg_armor /= arc_count\n arc_avg_weapon /= arc_count\n\n if cvl_count != 0:\n cvl_avg_armor /= cvl_count\n cvl_avg_weapon /= cvl_count\n\n if inf_count != 0:\n inf_avg_armor /= inf_count\n inf_avg_weapon /= inf_count\n ##################################################################################\n return (inf_count, inf_avg_weapon, inf_avg_armor), (arc_count, arc_avg_weapon, arc_avg_armor), (cvl_count, cvl_avg_weapon, cvl_avg_armor)", "def __calculateNormalizedScores(self):\n year_scores = {0 : []}\n for venue in self.venue_scores:\n v_scores = []\n for year in self.venue_scores[venue]:\n v_scores.append(self.venue_scores[venue][year])\n if year not in year_scores:\n year_scores[year] = []\n year_scores[year].append(self.venue_scores[venue][year])\n x_year = np.average(np.array(v_scores))\n self.venue_scores[venue][0] = x_year\n year_scores[0].append(x_year)\n \n ##for standardization\n #year_metrics = {x : (np.average(np.array(year_scores[x])), np.std(np.array(year_scores[x]))) for x in year_scores}\n ##for normalization\n year_metrics = {x: (max(year_scores[x]), min(year_scores[x])) for x in year_scores}\n \n #print year_metrics\n \n for venue in self.venue_scores:\n self.normalized_scores[venue] = dict()\n for year in self.venue_scores[venue]:\n #self.standard_scores[venue][year] = round((self.venue_scores[venue][year] - year_metrics[year][0]) / year_metrics[year][1],5)\n #self.normalized_scores[venue][year] = (self.venue_scores[venue][year] - year_metrics[year][1]) / (year_metrics[year][0] - year_metrics[year][1]) + eps\n self.normalized_scores[venue][year] = (self.venue_scores[venue][year] - year_metrics[year][1] + self.epsilon) / (year_metrics[year][0] - year_metrics[year][1] + self.epsilon)", "def get_total_health(self,obs):\n total_health = 0\n for unit in obs.observation.raw_units:\n if(unit.alliance == PlayerRelative.SELF):\n total_health += unit[FeatureUnit.health]\n return total_health", "def percentage(self):\n temp = self.cpu_freq_time_spent.copy()\n for i in self.cpu_freq_time_spent:\n total = 0\n for j in self.cpu_freq_time_spent[i]:\n total += self.cpu_freq_time_spent[i][j]\n for j in self.cpu_freq_time_spent[i]:\n if total != 0:\n temp[i][j] = self.cpu_freq_time_spent[i][j] * 100 / total\n else:\n temp[i][j] = 0\n return temp", "def calculate_profit(self):", "def get_list_team_scores(self):\n scores = defaultdict(lambda: {\n \"scored_xg\": [],\n \"conceded_xg\": [],\n \"home_adv\": 0,\n \"expected_points\": 0\n })\n\n for g in self.games:\n scores[g.HomeTeam][\"scored_xg\"].append(g.FTHG)\n scores[g.HomeTeam][\"conceded_xg\"].append(g.FTAG)\n scores[g.AwayTeam][\"scored_xg\"].append(g.FTAG)\n scores[g.AwayTeam][\"conceded_xg\"].append(g.FTHG)\n\n for team in scores.keys():\n scores[team][\"expected_points\"] = (self.get_table(metric='points')[team] /\n len(scores[team][\"scored_xg\"]))\n\n return scores", "def get_percentage_sf_votes(self):\n\n votes_f = self.get_num_f_votes()\n votes_sf = self.get_num_sf_votes()\n\n # avoid dividing by zero\n if votes_f + votes_sf == 0:\n return 0\n else:\n ratio = float(votes_sf)/(votes_f + votes_sf)\n return round(ratio * 100, 1)", "def set_offensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0 and (bx[\"t2p_int\"] + bx[\"t3p_int\"]) > 0:\n fgm = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n fga = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n team_fgm = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n team_fga = team[\"t2p_int\"] + team[\"t3p_int\"]\n team_points = team[\"t2p_conv\"]*2 + team[\"t3p_conv\"]*3 + team[\"tl_conv\"]\n points = bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"]\n\n try:\n qAST = (Decimal(bx[\"minutes\"] / (team[\"minutes\"] / 5)) * (Decimal('1.14') * Decimal((team[\"assists\"] - bx[\"assists\"]) / team_fgm))) + \\\n Decimal((((team[\"assists\"] / team[\"minutes\"]) * bx[\"minutes\"] * 5 - bx[\"assists\"]) / ((team_fgm / team[\"minutes\"]) * bx[\"minutes\"] * 5 - fgm)) * (1 - (bx[\"minutes\"] / (team[\"minutes\"] / 5))))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n qAST = 1\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n qAST = 1\n\n fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n ast_part = Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2*(team_fga - fga))) * bx[\"assists\"]\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n ast_part = 0\n\n if bx[\"tl_int\"] > 0:\n ft_part = Decimal(1 - (1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_part = 0\n team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team[\"tl_conv\"] / team[\"tl_int\"]))**2) * team[\"tl_int\"] * Decimal('0.4'))\n try:\n team_orb_percentage = Decimal(team[\"reb_of\"] / (team[\"reb_of\"] + ((opp_team[\"reb_def\"] + opp_team[\"reb_of\"]) - opp_team[\"reb_of\"])))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n\n team_play_percentage = Decimal(team_scoring_poss / (team_fga + team[\"tl_int\"] * Decimal('0.4') + team[\"turnovers\"]))\n try:\n team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_weight = 0\n\n orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage\n\n fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)\n if bx[\"tl_conv\"] > 0:\n ft_x_poss = Decimal((1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_x_poss = Decimal(1 - (bx[\"tl_conv\"] / 1)**2) * Decimal('0.4') * bx[\"tl_int\"]\n try:\n sc_poss = (fg_part + ast_part + ft_part) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n sc_poss =0\n\n tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx[\"turnovers\"]\n\n pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx[\"t3p_conv\"]) * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team[\"t3p_conv\"] - bx[\"t3p_conv\"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2 * (team_fga - fga))) * bx[\"assists\"]\n except:\n pprod_ast_part = 0\n\n pprod_orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team[\"tl_conv\"] / team[\"tl_int\"])**2) * Decimal('0.4') * team[\"tl_int\"]))\n try:\n pprod = (pprod_fg_part + pprod_ast_part + bx[\"tl_conv\"]) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n pprod = 0\n\n try:\n result = 100 * (pprod / tot_poss)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n result = 0\n\n # print(\"fgm: \" + str(fgm))\n # print(\"fga: \" + str(fga))\n # print(\"team_fgm: \" + str(team_fgm))\n # print(\"team_fga: \" + str(team_fga))\n # print(\"team_points: \" + str(team_points))\n # print(\"points: \" + str(points))\n # print(\"qAST: \" + str(qAST))\n # print(\"fg_part: \" + str(fg_part))\n # print(\"ast_part: \" + str(ast_part))\n # print(\"ft_part: \" + str(ft_part))\n # print(\"team_scoring_poss: \" + str(team_scoring_poss))\n # print(\"team_orb_percentage: \" + str(team_orb_percentage))\n # print(\"team_play_percentage: \" + str(team_play_percentage))\n # print(\"team_orb_weight: \" + str(team_orb_weight))\n # print(\"orb_part: \" + str(orb_part))\n # print(\"fg_x_poss: \" + str(fg_x_poss))\n # print(\"ft_x_poss: \" + str(ft_x_poss))\n # print(\"sc_poss: \" + str(sc_poss))\n # print(\"tot_poss: \" + str(tot_poss))\n # print(\"pprod_fg_part: \" + str(pprod_fg_part))\n # print(\"pprod_ast_part: \" + str(pprod_ast_part))\n # print(\"pprod_orb_part: \" + str(pprod_orb_part))\n # print(\"pprod: \" + str(pprod))\n # print(\"result: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n\n self.ortg = \"%.2f\" % round(result, 2)\n if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:\n \"\"\"For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG \n using team's formula\"\"\"\n print(BCOLORS.OKBLUE + \"ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo\" + BCOLORS.ENDC)\n bx = self.get_standard_stats()\n result = round((bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])/self.get_team_possessions(), 2)\n self.ortg = \"%.2f\" % result", "def cumulative_stats_for_teams_each_year(self):\n self.cumulative_stats_for_team_each_year = (\n self.combine_both_winning_losing_games_stats\n .sort_values(['WTeamID','Season'])\n .groupby(['WTeamID'])\n .cumsum()\n .pipe(lambda x:x.assign(Season = self.combine_both_winning_losing_games_stats.Season.values))\n .pipe(lambda x:x.assign(TeamID = self.combine_both_winning_losing_games_stats.WTeamID.values))\n .drop(['LTeamID','win_rate'],1)\n .pipe(lambda x:x.assign(win_rate = x.winning_num_counts/(x.winning_num_counts + x.losing_num_counts)))\n .pipe(lambda x:x.assign(WFGP = x.WFGM/x.WFGA))\n .pipe(lambda x:x.assign(WFG3P = x.WFGM3/x.WFGA3))\n .pipe(lambda x:x.assign(WFTP = x.WFTM/x.WFTA))\n .pipe(lambda x:x.assign(LFGP = x.LFGM/x.LFGA))\n .pipe(lambda x:x.assign(LFG3P = x.LFGM3/x.LFGA3))\n .pipe(lambda x:x.assign(LFTP = x.LFTM/x.LFTA))\n .pipe(lambda x:x.assign(fgp = x.total_fgm/x.total_fga))\n .pipe(lambda x:x.assign(fg3p = x.total_fg3m/x.total_fg3a))\n .pipe(lambda x:x.assign(ftp = x.total_ftm/x.total_fta))\n # rebounds cumsum stats\n .pipe(lambda x:x.assign(total_def_rebounds_percent = x.total_def_rebounds/x.total_rebounds))\n .pipe(lambda x:x.assign(total_off_rebounds_percent = x.total_off_rebounds/x.total_rebounds))\n .pipe(lambda x:x.assign(total_rebound_possession_percent = x.total_off_rebounds/x.total_team_missed_attempts))\n .pipe(lambda x:x.assign(total_rebound_possessiongain_percent = x.total_def_rebounds/x.total_opp_team_missed_attempts))\n # assists, turnovers, steals, blocks and personal fouls\n .pipe(lambda x:x.assign(total_block_opp_FGA_percent = x.total_blocks/x.total_opp_fga))\n .pipe(lambda x:x.assign(total_assist_per_fgm = x.total_assists/x.total_fgm))\n .pipe(lambda x:x.assign(total_assist_turnover_ratio = x.total_assists/x.total_turnover))\n # win or lose by how many points\n .pipe(lambda x:x.assign(lose_rate = 1-x.win_rate))\n .pipe(lambda x:x.assign(win_score_by = x.WScore - x.losing_opponent_score))\n .pipe(lambda x:x.assign(lose_score_by = x.LScore - x.winning_opponent_score))\n .pipe(lambda x:x.assign(expectation_per_game = x.win_rate * x.win_score_by/x.winning_num_counts + x.lose_rate * x.lose_score_by/x.losing_num_counts))\n .pipe(lambda x:x.assign(avg_win_score_by = x.win_score_by/x.winning_num_counts))\n .pipe(lambda x:x.assign(avg_lose_score_by = x.lose_score_by/x.losing_num_counts))\n )", "def __call__(self, outcome):\n if not hasattr(self, 'total'):\n self.total = sum(self.values())\n return self[outcome] / self.total", "def _compute_user_stats():\n user_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n for user in wmt16_users:\n _user_stats = HIT.compute_status_for_user(user)\n _name = user.username\n _avg_time = seconds_to_timedelta(_user_stats[1])\n _total_time = seconds_to_timedelta(_user_stats[2])\n _data = (_name, _user_stats[0], _avg_time, _total_time)\n \n if _data[0] > 0:\n user_stats.append(_data)\n \n # Sort by total number of completed HITs.\n user_stats.sort(key=lambda x: x[1])\n user_stats.reverse()\n \n return user_stats", "def mutual_info_score(self):\n _, _, I_CK = self._entropies()\n return I_CK / self.grand_total", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def expected_value(held_dice, num_die_sides, num_free_dice):\n list_scores = []\n die_sides = [die for die in range(1, num_die_sides + 1)]\n possible_seq = gen_all_sequences(die_sides, num_free_dice)\n for item in possible_seq:\n list_scores.append(score(held_dice + item))\n \n return float(sum(list_scores)) / len(list_scores)", "def getTeamStat(self, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashteamstats?Conference=&'\\\r\n 'DateFrom=&DateTo=&Division=&GameScope=&GameSegment=&'\\\r\n 'LastNGames=0&LeagueID=00&Location=&MeasureType=Base&'\\\r\n 'Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&'\\\r\n 'PerMode=PerGame&Period=0&PlayerExperience=&PlayerPosition=&'\\\r\n 'PlusMinus=N&Rank=N&Season=' + season + '&SeasonSegment=&'\\\r\n 'SeasonType=Regular+Season&ShotClockRange=&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n team_df = df[[\"TEAM_ID\",\"TEAM_NAME\",\"GP\",\"W\",\"L\",\"W_PCT\",\"MIN\",\"FGM\",\r\n \"FGA\",\"FG_PCT\",\"FG3M\",\"FG3A\",\"FG3_PCT\",\"FTM\",\"FTA\",\"FT_PCT\",\r\n \"OREB\",\"DREB\",\"REB\",\"AST\",\"TOV\",\"STL\",\"BLK\",\"BLKA\",\"PF\",\r\n \"PFD\",\"PTS\",\"PLUS_MINUS\"]]\r\n \r\n return team_df", "def as_counts_and_pcts(self):\n if self.e_pct is None:\n self.calculate_percentages()\n\n score_str = 'E: ' + str(self.e_score) + '(' + str(self.e_pct) + '%)/'\n score_str += 'I: ' + str(self.i_score) + '(' + str(self.i_pct) + '%) - '\n score_str += 'N: ' + str(self.n_score) + '(' + str(self.n_pct) + '%)/'\n score_str += 'S: ' + str(self.s_score) + '(' + str(self.s_pct) + '%) - '\n score_str += 'F: ' + str(self.f_score) + '(' + str(self.f_pct) + '%)/'\n score_str += 'T: ' + str(self.t_score) + '(' + str(self.t_pct) + '%) - '\n score_str += 'J: ' + str(self.j_score) + '(' + str(self.j_pct) + '%)/'\n score_str += 'P: ' + str(self.p_score) + '(' + str(self.p_pct) + '%)'\n return score_str", "def collect_stats(games: List[BaseGame], date_min = None, date_max = None):\n if not games: games = self.games\n\n under2_5 = len(list(filter(lambda g: g.is_total_under(), games)))\n under3_5 = len(list(filter(lambda g: g.is_total_under(3.5), games)))\n under1_5 = len(list(filter(lambda g: g.is_total_under(1.5), games)))\n\n home_score = sum([g.FTHG for g in games])\n away_score = sum([g.FTAG for g in games])\n\n home_wins = sum(1 for _ in filter(lambda g: g.is_home_win(), games))\n away_wins = sum(1 for _ in filter(lambda g: g.is_away_win(), games))\n draws = sum(1 for _ in filter(lambda g: g.is_draw(), games))\n\n return {\n 'under2.5': float(under2_5) / len(games),\n 'under3.5': float(under3_5) / len(games),\n 'under1.5': float(under1_5) / len(games),\n 'avgScoredHome': float(home_score) / len(games),\n 'avgScoredAway': float(away_score) / len(games),\n \"home_wins\": float(home_wins) / len(games),\n \"away_wins\": float(away_wins) / len(games),\n \"draws\": float(draws) / len(games),\n }", "def _calculate_result(found, total):\n return (found * 100) / total", "def play_game(game,standings_):\n rand_nmr = random.random()\n\n standings_.loc[standings_.TEAMS==game['Home'],'MP'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'MP'] += 1\n\n if rand_nmr < game['Prob Home']:\n n_goals = goals() # a random number of goals is added to the goal tally, all games and in 1-0,2-0,3-0 or 4-0. This can be improved\n standings_.loc[standings_.TEAMS==game['Home'],'W'] += 1\n standings_.loc[standings_.TEAMS==game['Home'],'F'] += n_goals\n standings_.loc[standings_.TEAMS==game['Away'],'L'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'A'] += n_goals\n standings_.loc[standings_.TEAMS==game['Home']][\"h2h\"].apply(lambda x:x.append(game['Away']))\n\n return 0\n\n elif rand_nmr < game['Prob Home'] + game['Prob Draw']:\n # all draws end in 0-0 this can be improved\n standings_.loc[standings_.TEAMS==game['Home'],'D'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'D'] += 1\n\n return 1\n\n else:\n n_goals = goals() # a random number of goals is added to the goal tally, all games and in 1-0,2-0,3-0 or 4-0. This can be improved\n standings_.loc[standings_.TEAMS==game['Away'],'W'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'F'] += n_goals\n standings_.loc[standings_.TEAMS==game['Home'],'A'] += 1\n standings_.loc[standings_.TEAMS==game['Home'],'L'] += n_goals\n standings_.loc[standings_.TEAMS==game['Away']][\"h2h\"].apply(lambda x:x.append(game['Home']))\n\n return 2", "def display_stats(self):\n print(\"Simulation took: {:.2f} seconds to execute\".format(time.time() - self.start_time))\n for i, win in enumerate(self.wins):\n average = 0\n if win:\n average = float(self.tries[i]) / win\n print(\"Player {} wins: {} with (average number of rounds: {:.2f})\".format(i+1, win, average))", "def _compute_winrates(synergy, counter, heroes_released):\n for i in range(heroes_released):\n for j in range(heroes_released):\n if i != j and i != 23 and j != 23:\n if synergy['games'][i, j] != 0:\n synergy['winrate'][i, j] = synergy['wins'][i, j] / \\\n float(synergy['games'][i, j])\n\n if counter['games'][i, j] != 0:\n counter['winrate'][i, j] = counter['wins'][i, j] / \\\n float(counter['games'][i, j])", "def grade_inst(inst, population):\n \n # Initialize to keep track of score \n inst_score = 0\n \n # Battle against each instance in population\n for i in range(len(population)):\n battle_result = inst.battle(population[i])\n inst_score += battle_result[0]\n \n # Also record the score in the instance\n inst.set_score(inst_score)\n\n return inst_score", "def calculate_scores():\n all_people = models.Leaderboard.query.order_by(\n models.Leaderboard.score.desc()).all()\n print(all_people)\n users = []\n scores = []\n for person in all_people:\n users.append(person.username)\n scores.append(person.score)\n return users, scores", "def winrate(matches):\n if not matches:\n print('no matches')\n return None\n\n win_loss = [match['result'] for match in matches]\n return sum(win_loss)/len(win_loss)", "def set_total_reb_of_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n result = 0.00\n try:\n if bx[\"reb_of\"] > 0 and bx[\"minutes\"] > 0:\n result = ((bx[\"reb_of\"] * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team[\"reb_of\"] + opp_team[\"reb_def\"])))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n self.total_reb_of_percentage = \"%.2f\" % round(result, 2)", "def update_scores(self):\r\n totals = [0, 0, 0, 0]\r\n for player in range(0, 4):\r\n for round_num in range(0, 17):\r\n try:\r\n bid = int(self.spin_bids[player][round_num].get())\r\n tricks = int(self.spin_tricks[player][round_num].get())\r\n except ValueError:\r\n bid = -1\r\n tricks = -1\r\n score = calc_score(min(round_num+1, 13), bid, tricks)\r\n self.lbl_scores[player][round_num].configure(text=str(score))\r\n totals[player] += score\r\n for player in range(0, 4):\r\n self.lbl_totals[player].configure(text=str(totals[player]))\r\n return totals[0] + totals[1] + totals[2] + totals[3]", "def TeamScores(level,team_N):\r\n \r\n groupresults = Combined_Non_Compound_Results(level).groupby('Club') \r\n # groups clubs together in a big list just for NMR\r\n # will need to generalise for all categories\r\n\r\n LoR = [ frame for LoRs, frame in groupresults ]\r\n \r\n TeamTable = pd.DataFrame({},columns=['Club','Total Score', # initial empty\r\n 'Total Golds', 'Total Hits']) # dataframe\r\n \r\n# Uni = pd.DataFrame({},columns=['Name','Club','Score','Golds', 'Hits'])\r\n TeamComposition = [[],[],[],[]]\r\n for j in range(4): # only four clubs in the dataframe\r\n\r\n Uni = LoR[j][0:team_N] # jth club in index, gets top team_N archers\r\n Uni = Uni.reset_index(drop=True) # resets the index for UCL sublist\r\n UniName = Uni.loc[0,'Club']\r\n\r\n Scores=0\r\n Golds=0\r\n Hits=0\r\n \r\n TeamComposition[j].append(UniName)\r\n\r\n for i in range(team_N): # sums the score,golds and hits for uni club j\r\n Scores += Uni.loc[i,'Score']\r\n Golds += Uni.loc[i,'Golds']\r\n Hits += Uni.loc[i,'Hits']\r\n\r\n TeamComposition[j].append(Uni.loc[i,'Name'])\r\n \r\n TeamTable2 = pd.DataFrame({'Club': [UniName], \r\n 'Total Score': [Scores],\r\n 'Total Golds': [Golds], \r\n 'Total Hits': [Hits]},\r\n columns=['Club','Total Score', \r\n 'Total Golds', 'Total Hits'])\r\n \r\n TeamTable = TeamTable.append(TeamTable2) # appends each club data\r\n\r\n TeamTable = TeamTable.sort_values(['Total Score','Total Golds',\r\n 'Total Hits'],ascending=[False,False,\r\n False],na_position='last')\r\n TeamTable = TeamTable.reset_index(drop=True)\r\n print()\r\n print(TeamTable)\r\n print()\r\n \r\n \r\n FinalList = [[],[],[],[]]\r\n \r\n for h in range(4):\r\n for g in range(4):\r\n if TeamTable.iloc[h,0] == TeamComposition[g][0]:\r\n FinalList[h] = TeamComposition[g]\r\n\r\n \r\n for k in range(4):\r\n print(FinalList[k])\r\n print()\r\n\r\n if level == NovCategories:\r\n \r\n return print(\"----- End of Novice Team Scores -----\")\r\n \r\n if level == AllCategories:\r\n \r\n return print(\"----- End of Experienced Team Scores -----\")", "def percenter(rank, max_rank):\n\treturn 100 * (rank/(max_rank or 1))", "def expected_value(held_dice, num_die_sides, num_free_dice):\n result = 0\n outcomes = range(1, num_die_sides + 1)\n possible = sorted(gen_all_sequences(outcomes, num_free_dice))\n for hand in possible:\n result += score(held_dice + hand)\n return float(result)/len(possible)", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def calc_stat_values(self):", "def rate(self, neighbors, labels):\n num = 0\n den = 0\n for neighbor in neighbors:\n lable = self.labels[neighbor[1]]\n dest_to_neighbor = neighbor[0]\n num += lable / dest_to_neighbor\n den += 1 / dest_to_neighbor\n return num/den" ]
[ "0.67434204", "0.6500815", "0.6359718", "0.63065845", "0.63019216", "0.6301762", "0.6299715", "0.6148283", "0.6126874", "0.6103746", "0.59502584", "0.59436804", "0.59398586", "0.5932481", "0.58808863", "0.58741057", "0.58336693", "0.58299714", "0.57860583", "0.5785277", "0.57851714", "0.5772505", "0.5765083", "0.5754176", "0.57456493", "0.5744941", "0.57435244", "0.57411337", "0.5740637", "0.57328707", "0.57090527", "0.5703763", "0.5695928", "0.5695037", "0.56833076", "0.5667735", "0.56660813", "0.5661647", "0.5658868", "0.564245", "0.564245", "0.5640655", "0.563955", "0.56324834", "0.56269974", "0.56265515", "0.56263846", "0.56194395", "0.56164384", "0.56158626", "0.56150824", "0.5611808", "0.5603649", "0.5599148", "0.5596763", "0.55725795", "0.5567675", "0.5562651", "0.55621547", "0.5558683", "0.55546826", "0.55500245", "0.5549437", "0.5549317", "0.55458724", "0.5544281", "0.554114", "0.5537532", "0.55233663", "0.55194324", "0.5516322", "0.5514533", "0.5513997", "0.5510753", "0.55095863", "0.5501924", "0.5501501", "0.5501018", "0.55000347", "0.5497445", "0.549627", "0.5494147", "0.54901165", "0.54873544", "0.5487157", "0.5484148", "0.5483676", "0.5482826", "0.54821", "0.54820836", "0.54816294", "0.54772824", "0.54760695", "0.54697263", "0.54645133", "0.5460016", "0.5458558", "0.54511875", "0.54451436", "0.54448134" ]
0.6911841
0
Method which calculate Total Rebound Percentage
def set_total_reb_percentage(self): bx = self.get_standard_stats() team = self.get_team_stats() opp_team = self.get_opp_team_stats() player_rebounds = bx["reb_def"] + bx["reb_of"] team_rebounds = team["reb_def"] + team["reb_of"] opp_team_rebounds = opp_team["reb_def"] + opp_team["reb_of"] result = 0.00 try: if bx["minutes"] > 0 and bx["minutes"] > 0: result = ((player_rebounds * (team["minutes"]/5)) / (bx["minutes"] * (team_rebounds + opp_team_rebounds)))*100 except ZeroDivisionError: print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC) except InvalidOperation: print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC) self.total_reb_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_total_reb_of_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n result = 0.00\n try:\n if bx[\"reb_of\"] > 0 and bx[\"minutes\"] > 0:\n result = ((bx[\"reb_of\"] * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team[\"reb_of\"] + opp_team[\"reb_def\"])))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n self.total_reb_of_percentage = \"%.2f\" % round(result, 2)", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def set_total_reb_def_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n result = 0.00\n try:\n if bx[\"minutes\"] > 0 and bx[\"minutes\"] > 0:\n result = ((bx[\"reb_def\"] * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team[\"reb_def\"] + opp_team[\"reb_of\"])))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.total_reb_def_percentage = \"%.2f\" % round(result, 2)", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def represent_total_percent(self, length):\n numpkgs = self.totals['numpkgs']\n dlpkgs = self.totals['dlpkgs']\n return self.represent_percent(dlpkgs, numpkgs, length)", "def pct(self):\n\t\treturn self.bottle.pct()", "def percentage(count, total):\n return count / total * 100", "def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)", "def calculate_percent(self, total_number, some_number):\n\t\treturn (some_number * 100) / total_number", "def pct_helper(self,k,d,total):\n if k in d:\n return 100.0*d[k]/total\n else:\n return -100.0", "def percent_raised(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.total_raised() * 100 / total_cost, 2)\n else:\n return 0", "def _calculate_result(found, total):\n return (found * 100) / total", "def __call__(self, relsSortedByScores, qrelDict):\n result = 0.\n postQty = len(qrelDict)\n\n pos = 0\n for i, rel in enumerate(relsSortedByScores):\n if rel > RELEVANCE_THRESHOLD:\n pos += 1.\n result += pos / (i + 1.)\n\n return result / postQty", "def rF(count, total):\n\treturn float(count)/float(total)", "def getPercent(*args):", "def getPercent(*args):", "def update_percent(self):", "def compute_total_customs_duty(self):\n for rec in self:\n total = 0.0\n extra_duty = 0.0\n price_total = rec.quantity * rec.unit_price\n# total = (price_total * duty_percentage)/100\n rec.price_total = price_total\n# for hts in rec.hts_ids:\n# if hts.extra_duty_applicable:\n# extra_duty += ((rec.quantity/hts.quantity) * hts.extra_duty)\n# rec.total = total + extra_duty\n\n return True", "def patrimony_total(self):\n pass", "def percent_usage(value, total):\n if total:\n return float(value) * 100.0 / (float(total) + float(value))\n else:\n return 100.0", "def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0", "def grand_total(self):\n return sum(self.grid[pos][1] for pos in assignable_positions if self.grid[pos][0]) + self.grid[\"nb\"][1]", "def percentage_update(self):\n\n self.event_update()\n return self.percentage", "def CalculateBoundProbability(self, psi):\n\n\t\t_, _, _, boundTotal = self.CalculateBoundDistribution(psi)\n\n\t\treturn boundTotal", "def get_free_set_percentage(self, params):\n raise NotImplementedError()", "def percentage(self):\n temp = self.cpu_freq_time_spent.copy()\n for i in self.cpu_freq_time_spent:\n total = 0\n for j in self.cpu_freq_time_spent[i]:\n total += self.cpu_freq_time_spent[i][j]\n for j in self.cpu_freq_time_spent[i]:\n if total != 0:\n temp[i][j] = self.cpu_freq_time_spent[i][j] * 100 / total\n else:\n temp[i][j] = 0\n return temp", "def fractionPassing(self):\n return self.cut.entries / self.entries", "def robbins(counts):\n return float(singles(counts))/counts.sum()", "def norm_percent(raw):\n if sum(raw) != 0:\n return [float(i)/sum(raw)*100 for i in raw]\n else:\n return [0 for i in raw]", "def rate(self, neighbors, labels):\n num = 0\n den = 0\n for neighbor in neighbors:\n lable = self.labels[neighbor[1]]\n dest_to_neighbor = neighbor[0]\n num += lable / dest_to_neighbor\n den += 1 / dest_to_neighbor\n return num/den", "def calculate_profit(self):", "def calculate_finalscore(self):\n\n if self.count!=0:\n print(self.count)\n print(self.badGuess)\n self.finalScore=(self.total/self.count)- ((self.total/self.count)*(10*self.badGuess)/100)\n\n\n else:\n self.finalScore=self.total", "def get_percentage(self):\n return self.PotTax_percentage", "def compute_bound(self, sstats, totals):\n w = self.vocab_len\n t = self.num_time_slices\n\n term_1 = 0\n term_2 = 0\n term_3 = 0\n\n val = 0\n ent = 0\n\n chain_variance = self.chain_variance\n # computing mean, fwd_mean\n self.mean, self.fwd_mean = \\\n (np.array(x) for x in zip(*(self.compute_post_mean(w, self.chain_variance) for w in range(w))))\n self.zeta = self.update_zeta()\n\n val = sum(self.variance[w][0] - self.variance[w][t] for w in range(w)) / 2 * chain_variance\n\n logger.info(\"Computing bound, all times\")\n\n for t in range(1, t + 1):\n term_1 = 0.0\n term_2 = 0.0\n ent = 0.0\n for w in range(w):\n\n m = self.mean[w][t]\n prev_m = self.mean[w][t - 1]\n\n v = self.variance[w][t]\n\n # w_phi_l is only used in Document Influence Model; the values are always zero in this case\n # w_phi_l = sslm.w_phi_l[w][t - 1]\n # exp_i = np.exp(-prev_m)\n # term_1 += (np.power(m - prev_m - (w_phi_l * exp_i), 2) / (2 * chain_variance)) -\n # (v / chain_variance) - np.log(chain_variance)\n\n term_1 += \\\n (np.power(m - prev_m, 2) / (2 * chain_variance)) - (v / chain_variance) - np.log(chain_variance)\n term_2 += sstats[w][t - 1] * m\n ent += np.log(v) / 2 # note the 2pi's cancel with term1 (see doc)\n\n term_3 = -totals[t - 1] * np.log(self.zeta[t - 1])\n val += term_2 + term_3 + ent - term_1\n\n return val", "def percent_updated(self):\n return self.percent_complete - self.previous_percent_complete", "def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget", "def percent(value, total):\n if total:\n return float(value) * 100.0 / float(total)\n else:\n return 100.0", "def life_insurance_to_recive_total(self):\n pass", "def mask_percentage(self):\n return 100 - self.tissue_percentage", "def proper_annulus_centres(self) -> Quantity:\n return self._proper_ann_centres", "def usage_percent(used, total, _round=None):\r\n try:\r\n ret = (used / total) * 100\r\n except ZeroDivisionError:\r\n ret = 0\r\n if _round is not None:\r\n return round(ret, _round)\r\n else:\r\n return ret", "def stretch_pct(cube,out,pct):\n\n try:\n isis.percent(from_=cube, to=out, percentage=pct)\n val = isis.getkey(from_=out, grpname=\"Results\", keyword=\"Value\").decode().replace('\\n', '')\n except ProcessError as e:\n val = None\n if val:\n return float(val)\n else:\n return None", "def get_percentage(self):\n return self.percentage", "def get_percentage(self):\n return self.percentage", "def total_rewards(self) -> float:\n return self.__total_rewards", "def _percent(self, lines_total, lines_covered):\n\n if lines_total == 0:\n return '0.0'\n return str(float(float(lines_covered) / float(lines_total)))", "def calc_stat_values(self):", "def get_gst_subtotals(self):\n self.__subtotal_gst = 0\n for current_item in self.__items_list:\n self.__subtotal_gst += current_item.calculate_gst()\n return self.__subtotal_gst", "def ComputeNrb(self):\r\n pass", "def __calculate_statistics(self, candidates):\n pdf = {}\n for candidate in candidates:\n neighbors = list(self.G.neighbors(candidate))\n capacity = sum([self.G.get_edge_data(candidate, n)[\"satoshis\"] for n in neighbors])\n average = capacity / len(neighbors)\n pdf[candidate] = average\n cumsum = sum(pdf.values())\n pdf = {k:v/cumsum for k,v in pdf.items()}\n w = 0.7\n print(\"percentage smoothed percentage capacity numchannels alias\")\n print(\"----------------------------------------------------------------------\")\n res_pdf = {}\n for k,v in pdf.items():\n neighbors = list(self.G.neighbors(k))\n capacity = sum([self.G.get_edge_data(k, n)[\"satoshis\"] for n in neighbors])\n name = k\n if \"alias\" in self.G.node[k]:\n name = self.G.node[k][\"alias\"]\n print(\"{:12.2f} \".format(100*v), \"{:12.2f} \".format(100*(w * v + (1-w)/len(candidates))) ,\"{:10} {:10} \".format( capacity, len(neighbors)), name)\n res_pdf[k] = (w * v + (1-w)/len(candidates))\n return res_pdf", "def percent_b(self) -> float:\n return self._percent_b", "def percentage(part, whole):\n return round((100 * float(part)/float(whole)),2)", "def calc_pct_to_save_as_doublets(self):\n x, y = load_expected_doublet_rates( # pylint: disable=invalid-name\n \"/Users/austinhartman/Desktop/doublet-caller/src/expected_doublet_rates.csv\"\n )\n r = calculate_expected_doublet_rate(x, y) # pylint: disable=invalid-name\n return self.num_cells * r[\"coefficient\"] + r[\"intercept\"]", "def total(self) -> float:\n\n remained_to_be_taxed = self.income\n # taxed = list()\n self.tax_amounts = []\n start_tax_range = 0\n end_tax_range = self.bracket\n\n for i, b in enumerate(self.bracket):\n\n amount_to_tax = b.end - start_tax_range\n t = Taxed(min(amount_to_tax, remained_to_be_taxed), b.rate,\n min(amount_to_tax, remained_to_be_taxed) * b.rate)\n self.tax_amounts.append(t)\n # print(i, start_t ax_range, b.end, amount_to_tax, b.rate)\n\n remained_to_be_taxed -= amount_to_tax\n # print(remained_to_be_taxed)\n\n if b.end > self.income:\n break\n\n start_tax_range = b.end\n\n # print(taxed)\n return sum([t.tax for t in self.tax_amounts])", "def calc_percentage(count, total_count, percent_factor=100):\n percentage = Decimal(float(count) * float(percent_factor) / total_count)\n percentage = round(percentage, 2)\n return percentage", "def evaluate_percentage_of_class_for_each_flight(df_flights_info):\n df_new = df_flights_info[['FlightId', 'FareClass', 'Booking']]\n df_new = df_new.pivot(index='FlightId', columns='FareClass', values='Booking').astype(float)\n df_new.loc[:, 'Total'] = df_new.sum(axis=1).astype(float)\n for row, col in df_new.iterrows():\n for item in list(df_new):\n number_booking = df_new.loc[row, item]\n total_booking = df_new.loc[row]['Total']\n percentage = float(number_booking / total_booking)\n df_new.at[row, item] = percentage\n df_new = df_new.drop(columns=['Total'])\n df_new = df_new.stack()\n df_new = df_new.reset_index(level=[0, 1])\n df_flights_info = pandas.merge(df_flights_info, df_new, how='left', on=['FlightId', 'FareClass'])\n df_flights_info.rename(columns={0: 'Percentage'}, inplace=True)\n return df_flights_info", "def percentage(self):\n return sum(self.chunk_percentage) / self.total_steps", "def private_pension_total(self):\n pass", "def relative_change(nr1, nr2):\n\n return float(((nr2 - nr1) / nr1) * 100)", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def calculatePercentChange(self, oldValue, newValue):\n return (((newValue - oldValue)/oldValue)*100)", "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def getClassBalance(pshapes, bounds, proj):\n\n xmin, ymin, xmax, ymax = bounds\n bpoly = Polygon([(xmin, ymax),\n (xmax, ymax),\n (xmax, ymin),\n (xmin, ymin)])\n project = partial(\n pyproj.transform,\n pyproj.Proj(proj='latlong', datum='WGS84'),\n proj)\n bpolyproj = transform(project, bpoly)\n totalarea = bpolyproj.area\n polyarea = 0\n for pshape in pshapes:\n polyarea += pshape.area\n\n return polyarea/totalarea", "def coverage(self):\n try:\n return self.found * 100 / self.needed\n except ZeroDivisionError:\n return 100.0", "def cal_expected_map(self, ranking_list, total_rel=0):\r\n s = 0.0\r\n pr = 0\r\n pn = 0\r\n for ele in reversed(ranking_list):\r\n rel_doc_cnt = ele[0]\r\n this_doc_cnt = ele[1]\r\n nonrel_doc_cnt = this_doc_cnt - rel_doc_cnt\r\n s += self.A(pr, pn, rel_doc_cnt, nonrel_doc_cnt)\r\n pr += rel_doc_cnt\r\n pn += nonrel_doc_cnt\r\n total_rel += rel_doc_cnt\r\n #print s/total_rel\r\n if total_rel == 0:\r\n return 0\r\n return s/total_rel", "def percentageChange(self):\n try:\n curPrice = self.dailyData[-1].currentPrice\n closePrice = self.historicData[-1].closePrice\n except IndexError: # Just return zero when no historic or dailyData is available yet\n return 0.0\n return (curPrice - closePrice)/closePrice * 100", "def __call__(self, outcome):\n if not hasattr(self, 'total'):\n self.total = sum(self.values())\n return self[outcome] / self.total", "def getTotalBusinessPercentForSalePercents(sc_sp:float, lc_sp:float):\n sc_tbp,lc_tbp,actualRainPercent = 0.0,0.0,0.0\n for i in range(int(Problem2.TOTAL_WEEKS)):\n isRainy = Problem2.eventOccurred(Problem2.RAIN_PERCENT)\n if isRainy:\n actualRainPercent += 1\n sc_hasSale = Problem2.eventOccurred(sc_sp)\n lc_hasSale = Problem2.eventOccurred(lc_sp)\n sc_ns_bp, lc_ns_bp, ss_bp = Problem2.setBusinessPercents(isRainy)\n sc_bp, lc_bp = Problem2.getBusinessPercentBySales(sc_hasSale, lc_hasSale, sc_ns_bp, lc_ns_bp, ss_bp)\n sc_tbp += sc_bp\n lc_tbp += lc_bp\n\n sc_tbp /= Problem2.TOTAL_WEEKS\n lc_tbp /= Problem2.TOTAL_WEEKS\n actualRainPercent /= Problem2.TOTAL_WEEKS\n return sc_tbp, lc_tbp, actualRainPercent", "def percentage(a, b):\n return (a * 100.0) / b", "def calculate_gpa(self):\n cur_node = self.head\n gpa = 0\n total_credits = 0\n while cur_node is not None:\n gpa += cur_node.data.grade() * cur_node.data.credit_hr()\n total_credits += cur_node.data.credit_hr()\n cur_node = cur_node.next\n if total_credits == 0:\n return 0\n return gpa / total_credits", "def return_on_total_assets():\r\n x = float(input(\"Please Enter Net Income Value: \"))\r\n y = float(input(\"Please Enter Interest Expense Value: \"))\r\n z = float(input(\"Please Enter Beginning Total Assets Value: \"))\r\n w = float(input(\"Please Enter Ending Total Assets Value: \"))\r\n d = ((float(x)+float(y)) / ((float(z)+float(w)) / float(2))) * float(100)\r\n print \">> Your Rate of Return on Total Assets is\",round(d,1),\"%\"", "def overall_reduction(self):\n return 84", "def get_price_subtotals(self):\n self.__subtotal_price = 0\n for current_item in self.__items_list:\n self.__subtotal_price += current_item.get_item_base_price()\n return self.__subtotal_price", "def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"", "def get_percent(self):\n return self.percent", "def calculate_amount_payable_rule_old(self, total):\n return self.amount_raised * Decimal(0.95)", "def percentages(self) -> pandas.Series:\n if self._percentages is None:\n scalar = 1 if self.use_fraction else 100\n self._percentages = scalar * self.counts/self.total\n return self._percentages", "def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0", "def calculate(self) -> float:", "def _set_percentage(self):\n\n step = float(self.step)\n end = float(self.end)\n self.percentage = format((100 * step / end), '.1f')", "def normalize(self):\n total = self.total()\n for x in self.d:\n self.d[x] /= total\n return total", "def percent_left(self):\n return 100 - self.percent_complete", "def percent(self):\r\n return self._percent", "def calculate_total(self):\n if self.total_price == 0:\n for discount in self.discounts:\n for item in self.items:\n item.add_discount(discount)\n\n for item in self.items:\n self.total_price += item.final_price()\n\n return self.total_price", "def set_effective_field_goal_percentage(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n tcConv = float(bx[\"t2p_conv\"] + bx[\"t3p_conv\"])\n result = 0.00\n if tcInt > 0:\n result = ((tcConv + (0.5 * float(bx[\"t3p_conv\"]))) / tcInt) * 100\n self.effective_field_goal_percentage = \"%.2f\" % round(result, 2)", "def recall(self) -> float:\n if self.ref_ignored:\n num_ref_ignored = len(self.ref_set) - len(self.ref_unignored_set)\n self.num_ignored += num_ref_ignored\n # True Positive = the number of unignored reference mappings that are Positive\n tp = len(self.ref_unignored_set.intersection(self.pre_set))\n # False Negative = the number of unignored reference mappings that are Negative\n fn = len(self.ref_set) - tp - num_ref_ignored\n return tp / (tp + fn)", "def get_duty_percentage(self):\n container_line_ids = self\n hbl_customs_obj = self.env['hbl.customs.duty']\n for line in container_line_ids:\n p_line = line.purchase_line\n #Get the supplier from product by using po supplier id.\n product_supplier_id = p_line.product_id.seller_ids.filtered(lambda rec:rec.name.id == p_line.partner_id.id and rec.hts_codes_ids)\n #Get HTS code of the supplier\n hts_codes_ids = product_supplier_id and product_supplier_id[0].hts_codes_ids or False\n if hts_codes_ids:\n percentage = sum(hts_codes_ids.mapped('percentage'))\n line_customs_id = hbl_customs_obj.create({'hbl_line_id' : line.id,\n 'hts_ids': [(6,_, hts_codes_ids.ids)],\n 'duty_percentage': percentage,\n 'quantity' : line.qty_to_load,\n 'unit_price' : p_line.price_unit\n })\n line.write({'line_customs_id' : line_customs_id.id})", "def calculate_vote_fractions():\n return _calculate_vote_fractions(models.get_candidate_to_vote_count())", "def get_average_percision_(qres, ibs=None, gt_aids=None):\n recall_range_, p_interp_curve = get_interpolated_precision_vs_recall_(qres, ibs=ibs, gt_aids=gt_aids)\n\n if recall_range_ is None:\n ave_p = np.nan\n else:\n ave_p = p_interp_curve.sum() / p_interp_curve.size\n\n return ave_p", "def compute_stats(self):\n if self.stats is not None:\n return\n self.stats = np.zeros(STEPS_MAX + 1)\n for m in self.missions:\n m.compute_stats()\n self.stats += 100 * m.stats\n self.stats /= len(self.missions)", "def profit_per_item_percentage(self, pk=None):\n total_profit_percentage = 0\n total_cost = self.item_cost + self.shipping_cost + self.listing_fee + self.final_value_fee\n total_paid = self.shipping_paid + self.item_paid\n total_profit_percentage = round(100*((total_paid - total_cost) / total_cost), 2)\n return total_profit_percentage", "def get_total(self):\n\n total = super().get_total()\n if self.qty < 10:\n total += 3.00\n return total", "def percent_of(part, whole):\n return part * 100 / whole", "def total_to_proportion(total_pronoun_dict):\n if total_pronoun_dict['total'] is 0:\n return total_pronoun_dict\n else:\n return{\n 'first_person_singular': total_pronoun_dict['first_person_singular']/total_pronoun_dict['total'],\n 'first_person_plural': total_pronoun_dict['first_person_plural']/total_pronoun_dict['total'],\n 'second_person': total_pronoun_dict['second_person']/total_pronoun_dict['total'],\n 'third_person_singular': total_pronoun_dict['third_person_singular']/total_pronoun_dict['total'],\n 'third_person_plural': total_pronoun_dict['third_person_plural']/total_pronoun_dict['total'],\n 'total': total_pronoun_dict['total']\n }", "def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None", "def ram_percent(self):\n self.monitoring_object['ram_percent'] = \\\n psutil.virtual_memory().used", "def calculateCurrentPercentageChange(self, Prices):\n threeDayMovingAverage = self.calculateLatestThreeDayMA(Prices)\n fifteenDayMovingAverage = self.calculateLatestFifteenDayMA(Prices)\n percentageChange = self.calculatePercentChange(\n fifteenDayMovingAverage, threeDayMovingAverage)\n return percentageChange", "def percentCheck(currentTimeLabel, totalTimeLabel):\n # Updated 11/19/16\n try:\n progPercent = float(currentTimeLabel) / float(totalTimeLabel) * 100\n except (ValueError , ZeroDivisionError):\n progPercent = 0\n \n return progPercent", "def fitness(individual, divider, target_sum, target_multiply):\n\n sum_val = reduce(operator.add, individual[:divider], 0)\n multiply_val = reduce(operator.mul, individual[divider:], 1)\n \n sum_error = abs(target_sum - sum_val)\n sum_error = sum_error / target_sum\n\n multiply_error = abs(target_multiply - multiply_val)\n multiply_error = multiply_error / target_multiply\n\n #print(multiply_error, sum_error)\n #print(sum_error, multiply_error)\n return (multiply_error + sum_error)/2 * 100", "def cagr(B, A, n):\n if B < 0: B = 0\n return (math.pow(B / A, 1 / n) - 1) * 100" ]
[ "0.6523484", "0.6491968", "0.63697445", "0.6303792", "0.6300233", "0.62549525", "0.6121597", "0.6087575", "0.60844654", "0.6044818", "0.6027688", "0.6009361", "0.5989413", "0.5969596", "0.59609574", "0.59609574", "0.59606254", "0.5904455", "0.5899307", "0.58770585", "0.584258", "0.58401334", "0.58382976", "0.5824316", "0.5794439", "0.5781197", "0.5766814", "0.5712529", "0.57073605", "0.57034516", "0.56790906", "0.56403285", "0.56107545", "0.56088257", "0.55992335", "0.5596763", "0.5595719", "0.557121", "0.55671036", "0.55598134", "0.55472976", "0.5547236", "0.55348074", "0.55348074", "0.5533357", "0.5528287", "0.5525748", "0.5525052", "0.55243754", "0.55156636", "0.5514154", "0.5513591", "0.5511515", "0.551105", "0.5507345", "0.5504536", "0.55025876", "0.5499663", "0.54978704", "0.54940504", "0.54933256", "0.5486853", "0.5482061", "0.5481559", "0.547937", "0.5473942", "0.54581296", "0.5455525", "0.54546916", "0.5450097", "0.54447657", "0.5431445", "0.54272735", "0.5421677", "0.5416862", "0.5413267", "0.54113734", "0.5397872", "0.5394833", "0.53911483", "0.5379467", "0.53628045", "0.5361172", "0.53595567", "0.535791", "0.5354153", "0.5351761", "0.53517336", "0.53496337", "0.53494453", "0.5348908", "0.53352207", "0.53317803", "0.5331302", "0.5330255", "0.53278524", "0.53260463", "0.5315842", "0.5304827", "0.5303798" ]
0.6903449
0
Method which calculate Total Rebound Defensive Percentage
def set_total_reb_def_percentage(self): bx = self.get_standard_stats() team = self.get_team_stats() opp_team = self.get_opp_team_stats() result = 0.00 try: if bx["minutes"] > 0 and bx["minutes"] > 0: result = ((bx["reb_def"] * (team["minutes"]/5)) / (bx["minutes"] * (team["reb_def"] + opp_team["reb_of"])))*100 except ZeroDivisionError: print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC) except InvalidOperation: print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC) self.total_reb_def_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def set_total_reb_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n player_rebounds = bx[\"reb_def\"] + bx[\"reb_of\"]\n team_rebounds = team[\"reb_def\"] + team[\"reb_of\"]\n opp_team_rebounds = opp_team[\"reb_def\"] + opp_team[\"reb_of\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0 and bx[\"minutes\"] > 0:\n result = ((player_rebounds * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team_rebounds + opp_team_rebounds)))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.total_reb_percentage = \"%.2f\" % round(result, 2)", "def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)", "def pct(self):\n\t\treturn self.bottle.pct()", "def set_total_reb_of_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n result = 0.00\n try:\n if bx[\"reb_of\"] > 0 and bx[\"minutes\"] > 0:\n result = ((bx[\"reb_of\"] * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team[\"reb_of\"] + opp_team[\"reb_def\"])))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n self.total_reb_of_percentage = \"%.2f\" % round(result, 2)", "def get_free_set_percentage(self, params):\n raise NotImplementedError()", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def percent_raised(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.total_raised() * 100 / total_cost, 2)\n else:\n return 0", "def __call__(self, relsSortedByScores, qrelDict):\n result = 0.\n postQty = len(qrelDict)\n\n pos = 0\n for i, rel in enumerate(relsSortedByScores):\n if rel > RELEVANCE_THRESHOLD:\n pos += 1.\n result += pos / (i + 1.)\n\n return result / postQty", "def compute_total_customs_duty(self):\n for rec in self:\n total = 0.0\n extra_duty = 0.0\n price_total = rec.quantity * rec.unit_price\n# total = (price_total * duty_percentage)/100\n rec.price_total = price_total\n# for hts in rec.hts_ids:\n# if hts.extra_duty_applicable:\n# extra_duty += ((rec.quantity/hts.quantity) * hts.extra_duty)\n# rec.total = total + extra_duty\n\n return True", "def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget", "def overall_reduction(self):\n return 84", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def calculate_profit(self):", "def set_effective_field_goal_percentage(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n tcConv = float(bx[\"t2p_conv\"] + bx[\"t3p_conv\"])\n result = 0.00\n if tcInt > 0:\n result = ((tcConv + (0.5 * float(bx[\"t3p_conv\"]))) / tcInt) * 100\n self.effective_field_goal_percentage = \"%.2f\" % round(result, 2)", "def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0", "def coverage(self):\n try:\n return self.found * 100 / self.needed\n except ZeroDivisionError:\n return 100.0", "def max_occupancy_percent_for_deferred_work(self):\n return self._max_occupancy_percent_for_deferred_work", "def _calc_freeze_probability(self, num_iterations, final_fraction):\n return 1.0 - (final_fraction ** (1.0 / num_iterations))", "def mask_percentage(self):\n return 100 - self.tissue_percentage", "def penalty(self):\n return 0", "def percentage(self):\n temp = self.cpu_freq_time_spent.copy()\n for i in self.cpu_freq_time_spent:\n total = 0\n for j in self.cpu_freq_time_spent[i]:\n total += self.cpu_freq_time_spent[i][j]\n for j in self.cpu_freq_time_spent[i]:\n if total != 0:\n temp[i][j] = self.cpu_freq_time_spent[i][j] * 100 / total\n else:\n temp[i][j] = 0\n return temp", "def percent_usage(value, total):\n if total:\n return float(value) * 100.0 / (float(total) + float(value))\n else:\n return 100.0", "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def total_sdram_requirements(self):", "def recall(self) -> float:\n if self.ref_ignored:\n num_ref_ignored = len(self.ref_set) - len(self.ref_unignored_set)\n self.num_ignored += num_ref_ignored\n # True Positive = the number of unignored reference mappings that are Positive\n tp = len(self.ref_unignored_set.intersection(self.pre_set))\n # False Negative = the number of unignored reference mappings that are Negative\n fn = len(self.ref_set) - tp - num_ref_ignored\n return tp / (tp + fn)", "def pct_helper(self,k,d,total):\n if k in d:\n return 100.0*d[k]/total\n else:\n return -100.0", "def rF(count, total):\n\treturn float(count)/float(total)", "def calculate_percent(self, total_number, some_number):\n\t\treturn (some_number * 100) / total_number", "def update_percent(self):", "def compute_sufficient_stats(self):\n self.counts = (np.sum(self.resp, axis=0) + 10e-30)\n # print(self.counts)\n for k in range(self.k):\n self.means[k] = np.sum(self.resp[n, k] * self.x[n] for n in range(self.n)) / self.counts[k]\n self.covars[k] = np.sum(self.resp[n, k] * (self.x[n] - self.means[k]) @ (self.x[n] - self.means[k]).T\n for n in range(self.n)) / self.counts[k]\n self.covars[k] = np.nan_to_num(self.covars[k])\n self.means[k] = np.nan_to_num(self.means[k])", "def life_insurance_to_recive_total(self):\n pass", "def revenue_landfill(self) -> float:\n return self.income_statement.revenue.operating_revenue * (\n 1 - self.inputs.allocation_to_collection_unit\n )", "def cost(self) -> float:", "def get_expected_cost(self):", "def getPercent(*args):", "def getPercent(*args):", "def _calculate_result(found, total):\n return (found * 100) / total", "def set_defensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0:\n opp_fga = opp_team[\"t2p_int\"] + opp_team[\"t3p_int\"]\n opp_fgm = opp_team[\"t2p_conv\"] + opp_team[\"t3p_conv\"]\n try:\n dor = Decimal(opp_team[\"reb_of\"] / (opp_team[\"reb_of\"] + team[\"reb_def\"]))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n dor = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n dor = 0\n\n try:\n dfg = Decimal(opp_fgm / opp_fga)\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n dfg = 0\n try:\n fmwt = Decimal((dfg * (1 - dor)) / (dfg * (1 - dor) + (1 - dfg) * dor))\n except:\n fmwt = 0\n stops1 = bx[\"steals\"] + bx[\"block_shots\"] * fmwt * (1 - Decimal('1.07') * dor) + bx[\"reb_def\"] * (1 - fmwt)\n\n try:\n stops2 = (Decimal((opp_fga - opp_fgm - team[\"block_shots\"]) / team[\"minutes\"]) * fmwt * (1 - Decimal('1.07') * dor) + Decimal((opp_team[\"turnovers\"] - team[\"steals\"]) / team[\"minutes\"])) * bx[\"minutes\"] + Decimal(bx[\"fouls_cm\"] / team[\"fouls_cm\"]) * Decimal('0.4') * opp_team[\"tl_int\"] * (1 - Decimal(opp_team[\"tl_conv\"] / opp_team[\"tl_int\"]))**2\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n stops2 = 0\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n stops2 = 0\n\n stops = stops1 + stops2\n poss = self.get_team_possessions()\n if bx[\"minutes\"] > 0:\n stop_percentage = (float(stops) * float(opp_team[\"minutes\"])) / (float(poss) * float(bx[\"minutes\"]))\n else:\n stop_percentage = 0.00\n opp_points = opp_team[\"t2p_conv\"] * 2 + opp_team[\"t3p_conv\"] * 3 + opp_team[\"tl_conv\"]\n team_defensive_rating = 100 * (float(opp_points) / poss)\n try:\n d_pts_per_scposs = float(opp_points) / (float(opp_fgm) + (1 - (1 - (float(opp_team[\"tl_conv\"]) / float(opp_team[\"tl_int\"])))**2) * float(opp_team[\"tl_int\"])*0.4)\n result = Decimal(team_defensive_rating) + Decimal('0.2') * (100 * Decimal(d_pts_per_scposs) * (1 - Decimal(stop_percentage)) - Decimal(team_defensive_rating))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n d_pts_per_scposs = 0\n result = 0.00\n\n\n\n # print(\"dor: \" + str(dor))\n # print(\"dfg: \" + str(dfg))\n # print(\"fmwt: \" + str(fmwt))\n # print(\"stops1: \" + str(stops1))\n # print(\"stops2: \" + str(stops2))\n # print(\"stops: \" + str(stops))\n # print(\"poss: \" + str(poss))\n # print(\"stop_percentage: \" + str(stop_percentage))\n # print(\"opp_points: \" + str(opp_points))\n # print(\"team_defensive_rating: \" + str(team_defensive_rating))\n # print(\"d_pts_per_scposs: \" + str(d_pts_per_scposs))\n # print(\"drtg: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n self.drtg = \"%.2f\" % round(result, 2)", "def computeFScores(self, targetLabels, actualLabels):\r\n if self.prMeasures is None:\r\n self.prMeasures = self.computePRMeasures(targetLabels, actualLabels)\r\n if self.prMeasures[0] == 0:\r\n return 0\r\n self.f1score = 2 * self.prMeasures[0] * self.prMeasures[1] / (0.0 + self.prMeasures[0] + self.prMeasures[1])\r\n return self.f1score", "def fractionPassing(self):\n return self.cut.entries / self.entries", "def percentage(count, total):\n return count / total * 100", "def usage_percent(used, total, _round=None):\r\n try:\r\n ret = (used / total) * 100\r\n except ZeroDivisionError:\r\n ret = 0\r\n if _round is not None:\r\n return round(ret, _round)\r\n else:\r\n return ret", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def percentage_update(self):\n\n self.event_update()\n return self.percentage", "def fitness(individual, divider, target_sum, target_multiply):\n\n sum_val = reduce(operator.add, individual[:divider], 0)\n multiply_val = reduce(operator.mul, individual[divider:], 1)\n \n sum_error = abs(target_sum - sum_val)\n sum_error = sum_error / target_sum\n\n multiply_error = abs(target_multiply - multiply_val)\n multiply_error = multiply_error / target_multiply\n\n #print(multiply_error, sum_error)\n #print(sum_error, multiply_error)\n return (multiply_error + sum_error)/2 * 100", "def get_percentage(self):\n return self.PotTax_percentage", "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def percent_busy(self):\n return self._percent_busy", "def patrimony_total(self):\n pass", "def __calculate_statistics(self, candidates):\n pdf = {}\n for candidate in candidates:\n neighbors = list(self.G.neighbors(candidate))\n capacity = sum([self.G.get_edge_data(candidate, n)[\"satoshis\"] for n in neighbors])\n average = capacity / len(neighbors)\n pdf[candidate] = average\n cumsum = sum(pdf.values())\n pdf = {k:v/cumsum for k,v in pdf.items()}\n w = 0.7\n print(\"percentage smoothed percentage capacity numchannels alias\")\n print(\"----------------------------------------------------------------------\")\n res_pdf = {}\n for k,v in pdf.items():\n neighbors = list(self.G.neighbors(k))\n capacity = sum([self.G.get_edge_data(k, n)[\"satoshis\"] for n in neighbors])\n name = k\n if \"alias\" in self.G.node[k]:\n name = self.G.node[k][\"alias\"]\n print(\"{:12.2f} \".format(100*v), \"{:12.2f} \".format(100*(w * v + (1-w)/len(candidates))) ,\"{:10} {:10} \".format( capacity, len(neighbors)), name)\n res_pdf[k] = (w * v + (1-w)/len(candidates))\n return res_pdf", "def rate(self, neighbors, labels):\n num = 0\n den = 0\n for neighbor in neighbors:\n lable = self.labels[neighbor[1]]\n dest_to_neighbor = neighbor[0]\n num += lable / dest_to_neighbor\n den += 1 / dest_to_neighbor\n return num/den", "def percentage_complete(self) -> float:\n return self.__percentage_complete", "def represent_total_percent(self, length):\n numpkgs = self.totals['numpkgs']\n dlpkgs = self.totals['dlpkgs']\n return self.represent_percent(dlpkgs, numpkgs, length)", "def remaining_percent(self):\n return (self.remaining_words / self.total_words) * 100", "def overhead(readings):\n return 100.0 * (int(readings[0]) + int(readings[1])) / (int(readings[2]) + int(readings[3]))", "def get_estimated_percentage(self):\n now_id = now_as_id()\n message_id = self.last_message_id\n if message_id >= now_id:\n return 100.0\n \n channel_id = self.source_channel.id\n if channel_id >= message_id:\n return 0.0\n \n if self.is_polling_done():\n return 100.0\n \n return (1.0 - (now_id - message_id) / (now_id - channel_id)) * 100.0", "def calc_pct_to_save_as_doublets(self):\n x, y = load_expected_doublet_rates( # pylint: disable=invalid-name\n \"/Users/austinhartman/Desktop/doublet-caller/src/expected_doublet_rates.csv\"\n )\n r = calculate_expected_doublet_rate(x, y) # pylint: disable=invalid-name\n return self.num_cells * r[\"coefficient\"] + r[\"intercept\"]", "def penalty_calc(self):\n self.p_budget = (self.tx_oma_min - self.rx_unstressed_sensitivity - self.fiber_conn_loss)*self.l_1\n\n # fiber attenuation,\n self.p_atten = self.alpha*self.length # column B\n\n # calculate bandwidth for RIN test (exclude transmitter)\n rin_inverse_bw = np.sqrt(np.square(1.0/self.bw_cd) + np.square(1.0/self.bw_md) + (0.477/(self.rx_bw**2))*self.l_1)\n rin_bw = 1.0 / rin_inverse_bw\n\n # see FC-MSQS-2 equation B.47 in Annex B.4 for the following k_rin = math.sqrt(2.0/math.pi)*erfinv(0.8)\n k_rin = 0.7\n\n # v_rin,\n self.v_rin = (k_rin*1E6*(self.rin_test_isi**2)*rin_bw*\n math.pow(10.0,0.1*self.rin)) # column AK\n\n # Prin,\n print('v_rin: ', self.v_rin)\n print('Q: ',self.Q)\n print('isi_dj_refl_closed :', self.isi_dj_refl_closed)\n self.p_rin = -10.0*np.log10(np.sqrt(1.0-np.multiply(self.v_rin, np.square(self.Q/self.isi_dj_refl_closed)))) # column R\n print(\"P_rin : \", self.p_rin)\n self.beta = (3.14159E-6*self.speedup*self.br_nominal *self.delta_lambda*self.d1*self.length) # column O\n self.sigma_mpn = (self.k_mpn/math.sqrt(2.0)*(self.l_1 -np.exp(-np.square(self.beta)))) # column P\n self.p_mpn = (-10.0*np.log10(np.sqrt(self.l_1 - (self.Q**2)*np.square(self.sigma_mpn)))) # column Q\n self.p_blw = (-10.0*math.log10(math.sqrt(1.0- ((self.Q*self.sigma_blw)/ self.isi_tp4_rx)**2))*self.l_1) # cell T13\n self.p_reflection = -10.0*np.log10(self.isi_reflection) # column N\n self.v_mn = (((1.0-math.pow(10.0,-0.2*self.pmn))/ (self.Q)**2)*self.l_1) # cell AG7\n print(\"isi_center : \", self.isi_center)\n\n self.p_isi_center = -10.0*np.log10(self.isi_center) # column J\n\n self.p_isi_corners = (-10.0*np.log10(self.isi_corners) - self.p_isi_center) # column K\n self.p_isi_dj_center = (-10.0*np.log10(self.isi_dj_refl_closed) - self.p_isi_center) # column L\n self.p_isi_dj_corners = (-10.0*np.log10(self.isi_dj_corners) -self.p_isi_center -self.p_isi_corners) # column M\n\n\n # calculate the \"cross\" penalty contribution, column S\n arg1 = ((self.sigma_blw**2 + self.v_rin)/ np.square(self.isi_dj_refl_closed))\n arg2 = self.l_1 - (self.Q**2)*(arg1 + self.v_mn + np.square(self.sigma_mpn))\n arg3 = (-10.0*np.log10(np.multiply(self.isi_dj_refl_closed, np.sqrt(arg2))))\n self.p_cross_center = ( # column S\n arg3\n - self.p_blw # cell T13\n - self.p_isi_center # column J\n - self.p_isi_dj_center # column L\n - self.p_mpn # column Q\n - self.p_reflection # column N\n - self.p_rin # column R\n - self.pmn*self.l_1) # cell G13\n print('p_isi_center: ', self.p_isi_center)\n\n # calculate the total power budget evaluated at the center of the eye\n self.p_total_center = ( # column T\n self.p_isi_center # column J\n + self.p_isi_dj_center # column L\n + self.p_atten # column B\n + self.p_mpn # column Q\n + self.p_reflection # column N\n + self.p_rin # column R\n + self.p_cross_center # column S\n + self.pmn*self.l_1) # cell G13\n # calculate the total power budget evaluated at the corner of the eye\n self.p_total_corners = (\n self.p_isi_center # column J\n + self.p_isi_corners # column K\n + self.p_atten # column B\n + self.p_mpn # column Q\n + self.p_reflection # column N\n + self.p_rin # column R\n + self.p_cross_center # column S\n + self.pmn*self.l_1 # cell G13\n + self.p_isi_dj_corners)# column M\n\n # receiver stressed sensitivity\n self.margin = ( self.p_budget\n - self.p_total_center) # column W\n\n self.rx_stressed_sensitivity = (\n self.tx_oma_min*self.l_1\n - self.chil\n - self.p_mpn\n - self.p_reflection\n - self.p_rin\n - 0.5*self.p_cross_center\n - self.pmn*self.l_1\n - self.margin[self.lnum//2]*self.l_1)\n\n\n # end of GbE10.penalty_calc\n #======================================================================+", "def calculate_finalscore(self):\n\n if self.count!=0:\n print(self.count)\n print(self.badGuess)\n self.finalScore=(self.total/self.count)- ((self.total/self.count)*(10*self.badGuess)/100)\n\n\n else:\n self.finalScore=self.total", "def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)", "def efficiency_cal(self):\n Temp = 0\n for i in self.supplyseries:\n for j in self.demandseries:\n if(self.shortestpathij(i, j) == None):\n continue\n Temp += 1/self.shortestpathij(i, j)\n \n self.efficiency = 1/(self.supplynum*self.demandnum)*Temp", "def percentageChange(self):\n try:\n curPrice = self.dailyData[-1].currentPrice\n closePrice = self.historicData[-1].closePrice\n except IndexError: # Just return zero when no historic or dailyData is available yet\n return 0.0\n return (curPrice - closePrice)/closePrice * 100", "def calculate_gpa(self):\n cur_node = self.head\n gpa = 0\n total_credits = 0\n while cur_node is not None:\n gpa += cur_node.data.grade() * cur_node.data.credit_hr()\n total_credits += cur_node.data.credit_hr()\n cur_node = cur_node.next\n if total_credits == 0:\n return 0\n return gpa / total_credits", "def max_total_unready_percentage(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_total_unready_percentage\")", "def ComputeNrb(self):\r\n pass", "def evaluate_percentage_of_class_for_each_flight(df_flights_info):\n df_new = df_flights_info[['FlightId', 'FareClass', 'Booking']]\n df_new = df_new.pivot(index='FlightId', columns='FareClass', values='Booking').astype(float)\n df_new.loc[:, 'Total'] = df_new.sum(axis=1).astype(float)\n for row, col in df_new.iterrows():\n for item in list(df_new):\n number_booking = df_new.loc[row, item]\n total_booking = df_new.loc[row]['Total']\n percentage = float(number_booking / total_booking)\n df_new.at[row, item] = percentage\n df_new = df_new.drop(columns=['Total'])\n df_new = df_new.stack()\n df_new = df_new.reset_index(level=[0, 1])\n df_flights_info = pandas.merge(df_flights_info, df_new, how='left', on=['FlightId', 'FareClass'])\n df_flights_info.rename(columns={0: 'Percentage'}, inplace=True)\n return df_flights_info", "def cf_mean(self):\n return self['capacity_factor'] / 100", "def cf_mean(self):\n return self['capacity_factor'] / 100", "def calculateDataRate(self):\n pass", "def _percent(self, lines_total, lines_covered):\n\n if lines_total == 0:\n return '0.0'\n return str(float(float(lines_covered) / float(lines_total)))", "def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None", "def rate(self) -> float:\n return self.success_cnt / self.total_cnt if self.total_cnt > 0 else 1.0", "def percent_updated(self):\n return self.percent_complete - self.previous_percent_complete", "def cost_fun(self, specs_dict: Dict[str, float]) -> float:\n cost = 0\n for spec in self.spec_range.keys():\n penalty = self.compute_penalty(specs_dict[spec], spec)[0]\n cost += penalty\n\n return cost", "def _compute_register_bounds(cls, num_values, probability):\n bits = np.arange(1, num_values + 1)\n probs = scipy.stats.geom.cdf(bits, probability)\n return probs / probs[-1]", "def get_percentage(self):\n return self.percentage", "def get_percentage(self):\n return self.percentage", "def get_free_set_percentage(self, params):\n return params.scaling_options.free_set_percentage", "def ComputeRegenerativeBraking(self):\r\n pass", "def total_rewards(self) -> float:\n return self.__total_rewards", "def calculate(self) -> float:", "def grand_total(self):\n return sum(self.grid[pos][1] for pos in assignable_positions if self.grid[pos][0]) + self.grid[\"nb\"][1]", "def shield_percentage(self) -> Union[int, float]:\n if not self.proto.shield_max:\n return 0\n return self.proto.shield / self.proto.shield_max", "def shield_percentage(self) -> Union[int, float]:\n if not self.proto.shield_max:\n return 0\n return self.proto.shield / self.proto.shield_max", "def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)", "def percent_b(self) -> float:\n return self._percent_b", "def compute_stats(self):\n if self.stats is not None:\n return\n self.stats = np.zeros(STEPS_MAX + 1)\n for m in self.missions:\n m.compute_stats()\n self.stats += 100 * m.stats\n self.stats /= len(self.missions)", "def _cost_caught_by_police(self):\n if self.fine_frequency != 0:\n if self.number_of_courses % self.fine_frequency == 0 and self.number_of_courses != 0:\n if self.number_of_courses % self.fine_frequency_paid_by_driver == 0 and self.number_of_courses != 0:\n self.fine_paid_number_of_courses += 1\n fine_value = np.random.choice([100, 200, 500], p=[0.25, 0.4, 0.35])\n self.total_penalty_points += self._add_penalty_points() # adding penalty points\n return fine_value\n else:\n return 0\n else:\n return 0\n else:\n return 0", "def get_duty_percentage(self):\n container_line_ids = self\n hbl_customs_obj = self.env['hbl.customs.duty']\n for line in container_line_ids:\n p_line = line.purchase_line\n #Get the supplier from product by using po supplier id.\n product_supplier_id = p_line.product_id.seller_ids.filtered(lambda rec:rec.name.id == p_line.partner_id.id and rec.hts_codes_ids)\n #Get HTS code of the supplier\n hts_codes_ids = product_supplier_id and product_supplier_id[0].hts_codes_ids or False\n if hts_codes_ids:\n percentage = sum(hts_codes_ids.mapped('percentage'))\n line_customs_id = hbl_customs_obj.create({'hbl_line_id' : line.id,\n 'hts_ids': [(6,_, hts_codes_ids.ids)],\n 'duty_percentage': percentage,\n 'quantity' : line.qty_to_load,\n 'unit_price' : p_line.price_unit\n })\n line.write({'line_customs_id' : line_customs_id.id})", "def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0", "def private_pension_total(self):\n pass", "def calc_stats(hits, misses):\n try:\n result = (float(misses) / float(hits)) * 100.0\n except ZeroDivisionError:\n if misses == 0:\n result = 0.0\n else:\n result = 100.0\n return result", "def exceeded_ratio(self) -> float:\n return self.amount_spent / self.total_amount", "def calc_annual_electric_savings (self):\n costs = self.comp_specs['diesel generator o&m']\n\n for kW in costs.keys():\n try:\n if self.average_load < int(kW):\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n break\n except ValueError:\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n\n self.baseline_generation_cost = maintenance + \\\n (self.pre_intertie_generation_fuel_used * self.diesel_prices)\n\n maintenance = self.capital_costs * \\\n (self.comp_specs['percent o&m'] / 100.0)\n self.proposed_generation_cost = maintenance + \\\n self.intertie_offset_generation_fuel_used * \\\n self.intertie_diesel_prices\n self.annual_electric_savings = self.baseline_generation_cost -\\\n self.proposed_generation_cost\n #~ print len(self.annual_electric_savings)\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def _compute_penalty(self):\n raise ValueError('Implement in a child class')", "def PercentMaxRate(self):\n\t\treturn self._get_attribute('percentMaxRate')", "def calc_excess_energy (self):\n #~ print sorted(self.cd.keys())\n self.excess_energy = \\\n (self.generation_wind_proposed - self.transmission_losses) * \\\n (self.cd['percent excess energy'] / 100.0)\n #~ print 'self.excess_energy',self.excess_energy", "def _calcTotalMax(self, item, masterIntf, slaveIntf):\n if self.classmax:\n total = int((self.params[masterIntf]['intf_max']*self.params[masterIntf][slaveIntf]))\n total = int(total/self.params[masterIntf]['total_allocated'])+item['reserved']\n totalAll = item['reqRate'] + item['reserved']\n return max(total, totalAll)\n return item['reqRate'] + item['reserved']", "def compute_utilization(self) -> float:\r\n return self._compute_utilization" ]
[ "0.6720814", "0.65615296", "0.64223516", "0.63799196", "0.63289535", "0.63207704", "0.63154423", "0.62732965", "0.6261528", "0.6167289", "0.6124796", "0.61081874", "0.6068133", "0.60473025", "0.5999613", "0.5988745", "0.59840715", "0.5983306", "0.5973883", "0.59696484", "0.59576386", "0.59197676", "0.59165156", "0.587996", "0.5865504", "0.5859559", "0.585895", "0.5858603", "0.5855112", "0.5829298", "0.58145696", "0.57965887", "0.5792907", "0.57922316", "0.57863903", "0.5776798", "0.5776798", "0.5770127", "0.5761446", "0.57581216", "0.5758022", "0.57552683", "0.5754498", "0.5751767", "0.57515454", "0.57418174", "0.5736591", "0.5729933", "0.5724472", "0.5712921", "0.56928974", "0.5692151", "0.56806296", "0.5677196", "0.56634945", "0.56594735", "0.5657584", "0.5648082", "0.5647366", "0.5637153", "0.5632266", "0.5629826", "0.5628365", "0.56143117", "0.5604036", "0.56016886", "0.55992", "0.5595613", "0.5595613", "0.5594552", "0.55928564", "0.55927795", "0.5592624", "0.55915564", "0.5590641", "0.55886316", "0.55875444", "0.55875444", "0.5581265", "0.5565706", "0.55654895", "0.5562704", "0.5560306", "0.55576754", "0.55576754", "0.55507755", "0.5550245", "0.55489564", "0.55254537", "0.55235225", "0.55218625", "0.55194646", "0.55153394", "0.55142355", "0.55073756", "0.55067825", "0.5504176", "0.5494338", "0.54923844", "0.54922533" ]
0.6653622
1
Method which calculate Total Rebound Ofensive Percentage
def set_total_reb_of_percentage(self): bx = self.get_standard_stats() team = self.get_team_stats() opp_team = self.get_opp_team_stats() result = 0.00 try: if bx["reb_of"] > 0 and bx["minutes"] > 0: result = ((bx["reb_of"] * (team["minutes"]/5)) / (bx["minutes"] * (team["reb_of"] + opp_team["reb_def"])))*100 except ZeroDivisionError: print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC) self.total_reb_of_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_total_reb_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n player_rebounds = bx[\"reb_def\"] + bx[\"reb_of\"]\n team_rebounds = team[\"reb_def\"] + team[\"reb_of\"]\n opp_team_rebounds = opp_team[\"reb_def\"] + opp_team[\"reb_of\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0 and bx[\"minutes\"] > 0:\n result = ((player_rebounds * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team_rebounds + opp_team_rebounds)))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.total_reb_percentage = \"%.2f\" % round(result, 2)", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def pct(self):\n\t\treturn self.bottle.pct()", "def set_total_reb_def_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n result = 0.00\n try:\n if bx[\"minutes\"] > 0 and bx[\"minutes\"] > 0:\n result = ((bx[\"reb_def\"] * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team[\"reb_def\"] + opp_team[\"reb_of\"])))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.total_reb_def_percentage = \"%.2f\" % round(result, 2)", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)", "def percent_raised(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.total_raised() * 100 / total_cost, 2)\n else:\n return 0", "def __call__(self, relsSortedByScores, qrelDict):\n result = 0.\n postQty = len(qrelDict)\n\n pos = 0\n for i, rel in enumerate(relsSortedByScores):\n if rel > RELEVANCE_THRESHOLD:\n pos += 1.\n result += pos / (i + 1.)\n\n return result / postQty", "def calculate_profit(self):", "def get_free_set_percentage(self, params):\n raise NotImplementedError()", "def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget", "def overall_reduction(self):\n return 84", "def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0", "def getClassBalance(pshapes, bounds, proj):\n\n xmin, ymin, xmax, ymax = bounds\n bpoly = Polygon([(xmin, ymax),\n (xmax, ymax),\n (xmax, ymin),\n (xmin, ymin)])\n project = partial(\n pyproj.transform,\n pyproj.Proj(proj='latlong', datum='WGS84'),\n proj)\n bpolyproj = transform(project, bpoly)\n totalarea = bpolyproj.area\n polyarea = 0\n for pshape in pshapes:\n polyarea += pshape.area\n\n return polyarea/totalarea", "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def CalculateBoundProbability(self, psi):\n\n\t\t_, _, _, boundTotal = self.CalculateBoundDistribution(psi)\n\n\t\treturn boundTotal", "def represent_total_percent(self, length):\n numpkgs = self.totals['numpkgs']\n dlpkgs = self.totals['dlpkgs']\n return self.represent_percent(dlpkgs, numpkgs, length)", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def calculate_percent(self, total_number, some_number):\n\t\treturn (some_number * 100) / total_number", "def coverage(self):\n try:\n return self.found * 100 / self.needed\n except ZeroDivisionError:\n return 100.0", "def ComputeNrb(self):\r\n pass", "def fractionPassing(self):\n return self.cut.entries / self.entries", "def percentage(count, total):\n return count / total * 100", "def _calculate_result(found, total):\n return (found * 100) / total", "def rF(count, total):\n\treturn float(count)/float(total)", "def exceeded_ratio(self) -> float:\n return self.amount_spent / self.total_amount", "def percent_usage(value, total):\n if total:\n return float(value) * 100.0 / (float(total) + float(value))\n else:\n return 100.0", "def grand_total(self):\n return sum(self.grid[pos][1] for pos in assignable_positions if self.grid[pos][0]) + self.grid[\"nb\"][1]", "def fitness(individual, divider, target_sum, target_multiply):\n\n sum_val = reduce(operator.add, individual[:divider], 0)\n multiply_val = reduce(operator.mul, individual[divider:], 1)\n \n sum_error = abs(target_sum - sum_val)\n sum_error = sum_error / target_sum\n\n multiply_error = abs(target_multiply - multiply_val)\n multiply_error = multiply_error / target_multiply\n\n #print(multiply_error, sum_error)\n #print(sum_error, multiply_error)\n return (multiply_error + sum_error)/2 * 100", "def compute_total_customs_duty(self):\n for rec in self:\n total = 0.0\n extra_duty = 0.0\n price_total = rec.quantity * rec.unit_price\n# total = (price_total * duty_percentage)/100\n rec.price_total = price_total\n# for hts in rec.hts_ids:\n# if hts.extra_duty_applicable:\n# extra_duty += ((rec.quantity/hts.quantity) * hts.extra_duty)\n# rec.total = total + extra_duty\n\n return True", "def calculate_gpa(self):\n cur_node = self.head\n gpa = 0\n total_credits = 0\n while cur_node is not None:\n gpa += cur_node.data.grade() * cur_node.data.credit_hr()\n total_credits += cur_node.data.credit_hr()\n cur_node = cur_node.next\n if total_credits == 0:\n return 0\n return gpa / total_credits", "def upper_bound(self) -> float:\n ...", "def getPercent(*args):", "def getPercent(*args):", "def penalty(self):\n return 0", "def patrimony_total(self):\n pass", "def life_insurance_to_recive_total(self):\n pass", "def total_rewards(self) -> float:\n return self.__total_rewards", "def mb_r(self) -> float:\n # Calculate metric\n n = self.predicted.size\n tot = 0.0\n for i in range(n):\n tot = tot + np.sum(np.abs(self.predicted - self.true[i]))\n mae_val = np.sum(np.abs(self.predicted - self.true)) / n\n mb = 1 - ((n ** 2) * mae_val / tot)\n\n return float(mb)", "def get_percentage(self):\n return self.PotTax_percentage", "def cal_hit_gbratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n #print({d['user'].iloc[0]:d['ratings'].to_list() for i,d in top_k.groupby('user')})\n score = 0.0\n # golden items hit in the top_K items\n score_1 = {d['user'].iloc[0]:len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)]) for i,d in top_k.groupby('user')}\n score_2 = {d['user'].iloc[0]:len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)]) for i,d in top_k.groupby('user')} \n score_ratio = [(score_1[d]-score_2[d]/self._test_ratings[d]) if self._test_ratings[d]!=0 else 0 for d in self._test_ratings.keys()]\n\n #print(np.mean(score_ratio))\n #print(score_1)\n #score = score_1 + score_2\n return np.mean(score_ratio)", "def update_percent(self):", "def percent_b(self) -> float:\n return self._percent_b", "def private_pension_total(self):\n pass", "def percentage_update(self):\n\n self.event_update()\n return self.percentage", "def cost(self) -> float:", "def calc_pct_to_save_as_doublets(self):\n x, y = load_expected_doublet_rates( # pylint: disable=invalid-name\n \"/Users/austinhartman/Desktop/doublet-caller/src/expected_doublet_rates.csv\"\n )\n r = calculate_expected_doublet_rate(x, y) # pylint: disable=invalid-name\n return self.num_cells * r[\"coefficient\"] + r[\"intercept\"]", "def overhead(readings):\n return 100.0 * (int(readings[0]) + int(readings[1])) / (int(readings[2]) + int(readings[3]))", "def usage_percent(used, total, _round=None):\r\n try:\r\n ret = (used / total) * 100\r\n except ZeroDivisionError:\r\n ret = 0\r\n if _round is not None:\r\n return round(ret, _round)\r\n else:\r\n return ret", "def budget_used(self):\n return int(self.total_spent() / self.budget() * 100.0)", "def percentage(self):\n temp = self.cpu_freq_time_spent.copy()\n for i in self.cpu_freq_time_spent:\n total = 0\n for j in self.cpu_freq_time_spent[i]:\n total += self.cpu_freq_time_spent[i][j]\n for j in self.cpu_freq_time_spent[i]:\n if total != 0:\n temp[i][j] = self.cpu_freq_time_spent[i][j] * 100 / total\n else:\n temp[i][j] = 0\n return temp", "def robbins(counts):\n return float(singles(counts))/counts.sum()", "def calculate(self) -> float:", "def calculate_risk_tol(*args):\n global total_score\n risk_tol_start = 0.0\n\n for risk_per_pg in risk_tol_per_qs.iterkeys():\n try:\n risk_tol_start = risk_tol_start + risk_tol_per_qs[risk_per_pg][-1] # this is the last item in the list of each information in the page\n except IndexError:\n pass\n total_score = risk_tol_start", "def get_expected_cost(self):", "def total(self) -> float:\n\n remained_to_be_taxed = self.income\n # taxed = list()\n self.tax_amounts = []\n start_tax_range = 0\n end_tax_range = self.bracket\n\n for i, b in enumerate(self.bracket):\n\n amount_to_tax = b.end - start_tax_range\n t = Taxed(min(amount_to_tax, remained_to_be_taxed), b.rate,\n min(amount_to_tax, remained_to_be_taxed) * b.rate)\n self.tax_amounts.append(t)\n # print(i, start_t ax_range, b.end, amount_to_tax, b.rate)\n\n remained_to_be_taxed -= amount_to_tax\n # print(remained_to_be_taxed)\n\n if b.end > self.income:\n break\n\n start_tax_range = b.end\n\n # print(taxed)\n return sum([t.tax for t in self.tax_amounts])", "def revenue_landfill(self) -> float:\n return self.income_statement.revenue.operating_revenue * (\n 1 - self.inputs.allocation_to_collection_unit\n )", "def return_on_total_assets():\r\n x = float(input(\"Please Enter Net Income Value: \"))\r\n y = float(input(\"Please Enter Interest Expense Value: \"))\r\n z = float(input(\"Please Enter Beginning Total Assets Value: \"))\r\n w = float(input(\"Please Enter Ending Total Assets Value: \"))\r\n d = ((float(x)+float(y)) / ((float(z)+float(w)) / float(2))) * float(100)\r\n print \">> Your Rate of Return on Total Assets is\",round(d,1),\"%\"", "def compute_bound(self, sstats, totals):\n w = self.vocab_len\n t = self.num_time_slices\n\n term_1 = 0\n term_2 = 0\n term_3 = 0\n\n val = 0\n ent = 0\n\n chain_variance = self.chain_variance\n # computing mean, fwd_mean\n self.mean, self.fwd_mean = \\\n (np.array(x) for x in zip(*(self.compute_post_mean(w, self.chain_variance) for w in range(w))))\n self.zeta = self.update_zeta()\n\n val = sum(self.variance[w][0] - self.variance[w][t] for w in range(w)) / 2 * chain_variance\n\n logger.info(\"Computing bound, all times\")\n\n for t in range(1, t + 1):\n term_1 = 0.0\n term_2 = 0.0\n ent = 0.0\n for w in range(w):\n\n m = self.mean[w][t]\n prev_m = self.mean[w][t - 1]\n\n v = self.variance[w][t]\n\n # w_phi_l is only used in Document Influence Model; the values are always zero in this case\n # w_phi_l = sslm.w_phi_l[w][t - 1]\n # exp_i = np.exp(-prev_m)\n # term_1 += (np.power(m - prev_m - (w_phi_l * exp_i), 2) / (2 * chain_variance)) -\n # (v / chain_variance) - np.log(chain_variance)\n\n term_1 += \\\n (np.power(m - prev_m, 2) / (2 * chain_variance)) - (v / chain_variance) - np.log(chain_variance)\n term_2 += sstats[w][t - 1] * m\n ent += np.log(v) / 2 # note the 2pi's cancel with term1 (see doc)\n\n term_3 = -totals[t - 1] * np.log(self.zeta[t - 1])\n val += term_2 + term_3 + ent - term_1\n\n return val", "def total_sdram_requirements(self):", "def rate(self, neighbors, labels):\n num = 0\n den = 0\n for neighbor in neighbors:\n lable = self.labels[neighbor[1]]\n dest_to_neighbor = neighbor[0]\n num += lable / dest_to_neighbor\n den += 1 / dest_to_neighbor\n return num/den", "def total_area(self):\n return numpy.prod([r[1] - r[0] for r in self.range_])", "def get_tot_occ_rate(self):\n return 10 ** (self.a_value - self.b_value * self.min_mag) - \\\n 10 ** (self.a_value - self.b_value * self.max_mag)", "def pct_helper(self,k,d,total):\n if k in d:\n return 100.0*d[k]/total\n else:\n return -100.0", "def proper_annulus_centres(self) -> Quantity:\n return self._proper_ann_centres", "def current_nbc_coverage():\n covered = 0\n total = 0\n for layer in layer_to_compute:\n covered = covered + np.count_nonzero(nbc_cov_dict[layer.name])\n total = total + np.size(nbc_cov_dict[layer.name])\n return covered / float(total)", "def _calcTotalMax(self, item, masterIntf, slaveIntf):\n if self.classmax:\n total = int((self.params[masterIntf]['intf_max']*self.params[masterIntf][slaveIntf]))\n total = int(total/self.params[masterIntf]['total_allocated'])+item['reserved']\n totalAll = item['reqRate'] + item['reserved']\n return max(total, totalAll)\n return item['reqRate'] + item['reserved']", "def ram_percent(self):\n self.monitoring_object['ram_percent'] = \\\n psutil.virtual_memory().used", "def percent_updated(self):\n return self.percent_complete - self.previous_percent_complete", "def findBoundResonances(resonance):\n\n from ccpnmr.analysis.core.AssignmentBasic import getBoundResonances\n \n return getBoundResonances(resonance)", "def cf_mean(self):\n return self['capacity_factor'] / 100", "def cf_mean(self):\n return self['capacity_factor'] / 100", "def per_hour(self):\n if self.is_salary():\n return 0.0\n return self.wage_cents / 100.0", "def calculate_finalscore(self):\n\n if self.count!=0:\n print(self.count)\n print(self.badGuess)\n self.finalScore=(self.total/self.count)- ((self.total/self.count)*(10*self.badGuess)/100)\n\n\n else:\n self.finalScore=self.total", "def get_total_risk_level(self) -> int:\n origin = Point(0, 0)\n bounded_points = self.grid.get_bounded_points(origin, self.target)\n return sum(self.get_region(point) for point, _ in bounded_points)", "def g_score(self):\n _, _, I_CK = self._entropies()\n return 2.0 * I_CK", "def get_percentage(self):\n return self.percentage", "def get_percentage(self):\n return self.percentage", "def compute_quotation_price(self):\n result = decimal.Decimal('0')\n if self.vehiculePrice:\n result = self.vehiculePrice * 2 / 100\n if self.covWind:\n result += get_coverage_price_by_name(\"WIND\")\n if self.covPass:\n result += get_coverage_price_by_name(\"PASS\")\n if self.covFlood:\n result += get_coverage_price_by_name(\"FLOOD\")\n return result", "def percent_busy(self):\n return self._percent_busy", "def compute_sufficient_stats(self):\n self.counts = (np.sum(self.resp, axis=0) + 10e-30)\n # print(self.counts)\n for k in range(self.k):\n self.means[k] = np.sum(self.resp[n, k] * self.x[n] for n in range(self.n)) / self.counts[k]\n self.covars[k] = np.sum(self.resp[n, k] * (self.x[n] - self.means[k]) @ (self.x[n] - self.means[k]).T\n for n in range(self.n)) / self.counts[k]\n self.covars[k] = np.nan_to_num(self.covars[k])\n self.means[k] = np.nan_to_num(self.means[k])", "def _getBaselineThresh(self):\n print('Calculating 10% baseline')\n self.baseline = obrienBaseline.obrienBaseline(\n self.d['dos1rate'], timeWidth=5.0, \n cadence=0.1)\n self.peak_std = ( (self.d['dos1rate'][self.peakInd]/10 - \n self.baseline[self.peakInd]/10)/ \n np.sqrt(self.d['dos1rate'][self.peakInd]/10))\n return", "def percentageChange(self):\n try:\n curPrice = self.dailyData[-1].currentPrice\n closePrice = self.historicData[-1].closePrice\n except IndexError: # Just return zero when no historic or dailyData is available yet\n return 0.0\n return (curPrice - closePrice)/closePrice * 100", "def totalValue(self):\n\n\t\tvalue = 0\n\t\tfor bottle in self.bottles:\n\t\t\tvalue += bottle.inflatedCost\n\n\t\treturn value", "def lower_bound(self) -> float:\n ...", "def tax_rate(self) -> float:\n return round((self.total / self.income) * 100, 2)", "def calc_low_energy_bulb_ratio(lighting_outlets_total, lighting_outlets_low_energy):\n return int(100 * float(lighting_outlets_low_energy) / lighting_outlets_total + 0.5) / 100.0", "def get_pc_per_range(model, class_name):\n class_total = model.class_counts[class_name]\n if model.num_runs is not None:\n class_total = model.num_runs * class_total * .33\n\n true_positives, totals = model.range_metrics_10[class_name]\n purities = [] # Accuracy per range (true positive/total)\n comps = []\n TP_count = 0\n total_count = 0\n\n for index in reversed(range(len(true_positives))):\n cur_p = 0 # Current purity\n cur_c = 0 # Current completeness\n TP_count += true_positives[index]\n total_count += totals[index]\n if total_count != 0:\n # positive class samples / totals # with prob in range\n cur_p = TP_count / total_count\n if class_total != 0:\n cur_c = TP_count / class_total\n\n purities.append(cur_p)\n comps.append(cur_c)\n purities.reverse()\n comps.reverse()\n return purities, comps", "def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)", "def _compute_register_bounds(cls, num_values, probability):\n bits = np.arange(1, num_values + 1)\n probs = scipy.stats.geom.cdf(bits, probability)\n return probs / probs[-1]", "def ComputeRegenerativeBraking(self):\r\n pass", "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def _calc_freeze_probability(self, num_iterations, final_fraction):\n return 1.0 - (final_fraction ** (1.0 / num_iterations))", "def rate(self) -> float:\n return self.success_cnt / self.total_cnt if self.total_cnt > 0 else 1.0", "def upper_bound(stock):\n counter=0\n for i in stock_price(stock):\n if i >= resistance(stock):\n counter+=1\n return counter", "def recall(self) -> float:\n if self.ref_ignored:\n num_ref_ignored = len(self.ref_set) - len(self.ref_unignored_set)\n self.num_ignored += num_ref_ignored\n # True Positive = the number of unignored reference mappings that are Positive\n tp = len(self.ref_unignored_set.intersection(self.pre_set))\n # False Negative = the number of unignored reference mappings that are Negative\n fn = len(self.ref_set) - tp - num_ref_ignored\n return tp / (tp + fn)", "def percentage_complete(self) -> float:\n return self.__percentage_complete", "def compute(self):\n rsa = self._session_graph.get_graph_property(self._FACTOR_KEY)\n rsa = rsa if rsa else 0.\n tr = self._session_graph.graph.num_edges()\n tr = tr if tr > 0 else 1\n rs = self._traffic_record['response_size']\n rsa = ((float(rsa) * (float(tr) - 1.)) + float(rs)) / float(tr)\n self.append_graph_factor('float', rsa)\n\n print \"Response Size Average : \", rsa\n pass", "def evaluate_percentage_of_class_for_each_flight(df_flights_info):\n df_new = df_flights_info[['FlightId', 'FareClass', 'Booking']]\n df_new = df_new.pivot(index='FlightId', columns='FareClass', values='Booking').astype(float)\n df_new.loc[:, 'Total'] = df_new.sum(axis=1).astype(float)\n for row, col in df_new.iterrows():\n for item in list(df_new):\n number_booking = df_new.loc[row, item]\n total_booking = df_new.loc[row]['Total']\n percentage = float(number_booking / total_booking)\n df_new.at[row, item] = percentage\n df_new = df_new.drop(columns=['Total'])\n df_new = df_new.stack()\n df_new = df_new.reset_index(level=[0, 1])\n df_flights_info = pandas.merge(df_flights_info, df_new, how='left', on=['FlightId', 'FareClass'])\n df_flights_info.rename(columns={0: 'Percentage'}, inplace=True)\n return df_flights_info", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final" ]
[ "0.68673694", "0.66328514", "0.65427613", "0.64778984", "0.64688313", "0.64383364", "0.63684523", "0.6308236", "0.6169081", "0.6124151", "0.60928077", "0.60605025", "0.6027442", "0.60271585", "0.60221356", "0.60200536", "0.6008577", "0.6006814", "0.5980527", "0.5972647", "0.5957324", "0.5947713", "0.5925849", "0.5912426", "0.5882893", "0.5882494", "0.58785963", "0.58748186", "0.587026", "0.58699393", "0.58682823", "0.5865907", "0.5845565", "0.5845565", "0.58424973", "0.58351046", "0.58340216", "0.58212936", "0.58189946", "0.58134544", "0.58093274", "0.5808309", "0.57993007", "0.578817", "0.57864535", "0.5785923", "0.5773871", "0.57572144", "0.5757201", "0.5748954", "0.5748799", "0.5748273", "0.57449937", "0.5731056", "0.5724983", "0.5722081", "0.57163554", "0.5714958", "0.57147497", "0.5713325", "0.57118773", "0.5709847", "0.57016456", "0.56990707", "0.5677512", "0.56720984", "0.56695664", "0.5668416", "0.5663785", "0.5660913", "0.56543", "0.56543", "0.5651694", "0.56430686", "0.5639245", "0.56349605", "0.56294227", "0.56294227", "0.56277084", "0.56253445", "0.5624303", "0.5624263", "0.5620671", "0.5618413", "0.5618074", "0.5611774", "0.5605818", "0.55979836", "0.55946213", "0.559111", "0.5589614", "0.5589394", "0.55878824", "0.5586106", "0.55805075", "0.5574297", "0.5564137", "0.55619425", "0.5550346", "0.5547965" ]
0.65185946
3
Method which calculate Steals Percentage of a player
def set_steals_percentage(self): bx = self.get_standard_stats() team = self.get_team_stats() opp_team = self.get_opp_team_stats() poss = self.get_team_possessions() result = 0.00 if bx["minutes"] > 0: result = ((bx["steals"] * (team["minutes"]/Decimal('5'))) / Decimal(float(bx["minutes"]) * poss)) * 100 self.steals_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)", "def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))", "def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)", "def pct(self):\n\t\treturn self.bottle.pct()", "def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent", "def percentage_update(self):\n\n self.event_update()\n return self.percentage", "def set_ts_percentage(self):\n bx = self.get_standard_stats()\n ptos = float(bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n tsAttempts = float(tcInt + (0.44*float(bx[\"tl_int\"])))\n result = 0.00\n if tsAttempts > 0.00:\n result = (ptos/(2*tsAttempts))*100\n self.ts_percentage = \"%.2f\" % round(result, 2)", "def per_hour(self):\n if self.is_salary():\n return 0.0\n return self.wage_cents / 100.0", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def getPercent(*args):", "def getPercent(*args):", "def percentage(count, total):\n return count / total * 100", "def set_assists_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n team_tc_conv = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n player_tc_conv = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0:\n result = (bx[\"assists\"] / (((bx[\"minutes\"] / (team[\"minutes\"] / 5)) * team_tc_conv) - player_tc_conv))*100\n result = result if result <= 100 and result >= 0 else 0\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.assists_percentage = \"%.2f\" % round(result, 2)", "def get_percentage(self):\n return self.percentage", "def get_percentage(self):\n return self.percentage", "def p(party, vote_count, s):\n return t(party, vote_count) / d(s)", "async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )", "def percentage(self):\n return sum(self.chunk_percentage) / self.total_steps", "def update_calculated_stats(self, player_data):\n # Updates calculated statistics\n fga = player_data['FGA']\n fgm = player_data['FGM']\n pa3 = player_data['3FGA']\n pm3 = player_data['3FGM']\n try:\n player_data['FG%'] = fgm/fga\n except:\n player_data['FG%'] = 0.0\n try:\n player_data['3FG%'] = pm3/pa3\n except:\n player_data['3FG%'] = 0.0\n return(player_data)", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate", "def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))", "def get_percent(self):\n return self.percent", "def progress(self) -> int:\n return int(round(100 * self.somme() / self.finances))", "def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None", "def set_usg_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n tcInt = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n a = tcInt + (Decimal('0.44')*bx[\"tl_int\"]) + bx[\"turnovers\"]\n b = team[\"minutes\"]/5\n c = (team[\"t2p_int\"] + team[\"t3p_int\"]) + (Decimal('0.44')*team[\"tl_int\"]) + team[\"turnovers\"]\n result = 0.00\n if bx[\"minutes\"] > 0:\n result = ((Decimal(a)*Decimal(b))/(bx[\"minutes\"]*c))*100\n self.usg_percentage = \"%.2f\" % round(result, 2)", "def percent_raised(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.total_raised() * 100 / total_cost, 2)\n else:\n return 0", "def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def update_percent(self):", "async def get_rob_percentage(level):\n chance = int(6 + (level // 10)) # first 10 levels is 6 for 30% chance\n if chance > 16:\n chance = 16\n return chance", "def get_percentage_sf_votes(self):\n\n votes_f = self.get_num_f_votes()\n votes_sf = self.get_num_sf_votes()\n\n # avoid dividing by zero\n if votes_f + votes_sf == 0:\n return 0\n else:\n ratio = float(votes_sf)/(votes_f + votes_sf)\n return round(ratio * 100, 1)", "def determineAmountToCall(self, player):\n\t\treturn sum(self.currentBet) - sum(player.betAmount)", "def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT", "def give_half(self, player):\n self.transfer(\n self,\n player,\n int(ceil(self.account.balance/2.0))\n )\n return '%s spins \\'hey\\' and gets half.' % (player,)", "def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n #Between 1-8\n return own_moves / opp_moves", "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def calculatePercentChange(self, oldValue, newValue):\n return (((newValue - oldValue)/oldValue)*100)", "def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"", "def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100", "def get_score_percent(self, value):\n qs_related = RoundData.objects.prefetch_related(\n 'shotdata').select_related('shotdata')\n\n round_holes = int(self.round_type)\n\n if value == 'par':\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'birdie_better':\n return round((qs_related.filter(shotdata__nr_strokes__lt=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'tbogey_worse':\n return round((qs_related.filter(shotdata__nr_strokes__gte=F('shotdata__hole__par')+3).count()/round_holes), 2)\n if isinstance(value, int):\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par') + value).count()/round_holes), 2)", "def shield_percentage(self) -> Union[int, float]:\n if not self.proto.shield_max:\n return 0\n return self.proto.shield / self.proto.shield_max", "def shield_percentage(self) -> Union[int, float]:\n if not self.proto.shield_max:\n return 0\n return self.proto.shield / self.proto.shield_max", "def percentage_used(self):\n return self.volume_used/self.total_volume * 100.0", "def percent(self):\r\n return self._percent", "def get_percent_wet():\n # Create an ADS1115 ADC (16-bit) instance.\n adc = Adafruit_ADS1x15.ADS1115()\n\n GAIN = 1\n DRY = 20280 # 100% Dry\n WET = 10140 # 100% Wet\n\n value = adc.read_adc(0, gain=GAIN)\n \n # print \"value: %d\" % value\n \n percent_dry = ((value - WET)*100)/(DRY-WET)\n percent_wet = 100 - percent_dry\n\n return percent_wet", "def percent(value, total):\n if total:\n return float(value) * 100.0 / float(total)\n else:\n return 100.0", "def __calc_s(self, df):\n df.loc[:, \"avg_num_drivers\"] = df.idle + df.incoming\n s = df.total / df.avg_num_drivers # df.total := amount of demand\n s[s > 1] = 1\n s[np.isnan(s)] = 0.0001\n s[np.isinf(s)] = 1\n\n df.loc[:, \"prob_of_s\"] = s\n df = df[[\"zone_id\", \"prob_of_s\"]]\n return df", "def pulse_width_percent(self) -> float:", "def _percent(self, lines_total, lines_covered):\n\n if lines_total == 0:\n return '0.0'\n return str(float(float(lines_covered) / float(lines_total)))", "def percentage(a, b):\n return (a * 100.0) / b", "def exceeded_ratio(self) -> float:\n return self.amount_spent / self.total_amount", "def calculate_percent(self, total_number, some_number):\n\t\treturn (some_number * 100) / total_number", "def stealability(self):\n stealability_score = float(self.price) / float(self.weight)\n print (stealability_score)\n\n if stealability_score < 0.5:\n return 'Not so stealable...'\n elif stealability_score >= 0.5 and stealability_score < 1.0:\n return 'Kinda stealable.'\n else:\n return 'Very stealable!'", "def percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"percentage\")", "def pulsewidth2pct(pw): \n shifted = pw - 500.0\n scaled = shifted / 2000.0 * 100.0\n pct = scaled\n return pct", "def calculate_profit(self):", "def pe_ratio(self):\n try:\n return self.price / self.dividend_yield\n except ZeroDivisionError:\n return 0.0", "def abbott_steam():\n per_klb = 20 # dollars per klb of steam\n kwh_eq = to_kwh(1) # kwh equivalent of steam\n per_kwh = per_klb / kwh_eq\n return per_kwh", "def get_percentage(self):\n return self.PotTax_percentage", "def calculate_probability(self):\n return 0", "def calculate_utility(state, player):\n thisPlayer = player\n \n if state.winner() == (not thisPlayer):\n return -BigInitialValue\n if state.winner() == thisPlayer:\n return BigInitialValue\n return calculate_possible_fours(state, thisPlayer) - calculate_possible_fours(state, not thisPlayer)", "def percentageChange(self):\n try:\n curPrice = self.dailyData[-1].currentPrice\n closePrice = self.historicData[-1].closePrice\n except IndexError: # Just return zero when no historic or dailyData is available yet\n return 0.0\n return (curPrice - closePrice)/closePrice * 100", "def percentage(part, whole):\n return round((100 * float(part)/float(whole)),2)", "def utility(self, state, player):\n if state.isWin() or state.isLose():\n return state.getScore()\n\n # In case of cycle.\n if player == PACMAN:\n return INFINITY\n else:\n return -INFINITY", "def percent_usage(value, total):\n if total:\n return float(value) * 100.0 / (float(total) + float(value))\n else:\n return 100.0", "def calculateSaleReturn(S,R,F,T):\n if (T > S):\n return 0\n\n if F == 100:\n return R*T/S\n\n return float(R) * ( 1.0 - math.pow(float(S-T)/float(S) , (100.0/float(F))))", "def get_percentage_f_votes(self):\n\n votes_f = self.get_num_f_votes()\n votes_sf = self.get_num_sf_votes()\n\n # avoid dividing by zero\n if votes_f + votes_sf == 0:\n return 0\n else:\n ratio = float(votes_f)/(votes_f + votes_sf)\n return round(ratio * 100, 1)", "def do_damage(self) -> float:\n sum = 0\n for operator in self.__operators:\n if operator.is_alive:\n operator.experience += 1\n sum += operator.experience / 100\n return 0.1 + sum", "def percent(obj,object2):\n if object2:\n return int(float(int(obj))/object2*100)\n else:\n return 0", "def calculate_my_win_strength(self):\n self.winStrength = self.strategy(deepcopy(self.currentBoardState))", "def set_total_reb_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n player_rebounds = bx[\"reb_def\"] + bx[\"reb_of\"]\n team_rebounds = team[\"reb_def\"] + team[\"reb_of\"]\n opp_team_rebounds = opp_team[\"reb_def\"] + opp_team[\"reb_of\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0 and bx[\"minutes\"] > 0:\n result = ((player_rebounds * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team_rebounds + opp_team_rebounds)))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.total_reb_percentage = \"%.2f\" % round(result, 2)", "def do_damage(self) -> float:\n res = 0.05 + self.experience / 100\n self.experience = self.experience + 1\n return res", "def percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"percentage\")", "def value(self, board, current_player, opposite_player):\n my_fours = self.check_for_streak(board, current_player, 4)\n my_threes = self.check_for_streak(board, current_player, 3)\n my_twos = self.check_for_streak(board, current_player, 2)\n opp_fours = self.check_for_streak(board, opposite_player, 4)\n if opp_fours > 0:\n return -100000\n else:\n return my_fours*100000 + my_threes*100 + my_twos", "def per(a):\n return a * 100", "def get_song_percent_remaining(result):\n return int((1 - (get_song_elapsed_milliseconds(result) / get_song_length_milliseconds(result))) * 100)", "def percentage(self) -> str:\n return ranged_value_to_percentage(\n self._device.fan_speed_limits, self._device.fan_speed\n )", "def total_rewards(self) -> float:\n return self.__total_rewards", "def percentCheck(currentTimeLabel, totalTimeLabel):\n # Updated 11/19/16\n try:\n progPercent = float(currentTimeLabel) / float(totalTimeLabel) * 100\n except (ValueError , ZeroDivisionError):\n progPercent = 0\n \n return progPercent", "def set_tichu_percentage(threshold):\n tichu_threshold = threshold\n tichu_cnt = 0\n deck = Deck()\n players = [Player(id=0), Player(id=1), Player(id=2), Player(id=3)]\n for i in range(100):\n myhands = deck.shuffle_and_deal()\n for idx in range(4):\n players[idx].assign_hand(myhands[idx])\n score = players[idx].hand_rating\n if score > tichu_threshold:\n tichu_cnt += 1\n players[idx].hand.show()\n print('Player calls Tichu with a hand rating of {:.1f}.'.format(score))\n print('\\n')\n print('Tichu percentage: {:.2f}'.format(tichu_cnt/100))", "def calculate_progress_percentage(d):\n successcounter = 0\n for test in d:\n if d[test][\"status\"] != \"not yet run\":\n successcounter += 1\n totalcounter = 0\n for test in d:\n totalcounter += 1\n return int(successcounter / totalcounter * 100)", "def rate(self) -> float:\n return self.success_cnt / self.total_cnt if self.total_cnt > 0 else 1.0", "def percentWinMult(percentWin, mult):\n if mult == 2:\n #increase percentage by a bit\n return percentWin - 0.08\n elif mult == 4:\n return percentWin - 0.04\n else:\n return percentWin - (mult/1000)", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def hero_healing_per_min(self):\n return self._hero_healing_per_min", "def set_effective_field_goal_percentage(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n tcConv = float(bx[\"t2p_conv\"] + bx[\"t3p_conv\"])\n result = 0.00\n if tcInt > 0:\n result = ((tcConv + (0.5 * float(bx[\"t3p_conv\"]))) / tcInt) * 100\n self.effective_field_goal_percentage = \"%.2f\" % round(result, 2)", "def host_result_value(winner: Winner) -> float:\n if winner == Winner.HOME:\n return 1\n if winner == Winner.AWAY:\n return 0\n return 0.5", "def calculateP(SD, numDiff):\n return numDiff/SD", "def calculateP(SD, numDiff):\n return numDiff/SD", "def _calculate_result(found, total):\n return (found * 100) / total", "def calculate_emission_prob(cls, w, t):\n #\n # p_w_t = cls._emission_counts[w, t]\n # p_t = cls._uni_transition_counts[t]\n\n return float(cls._emission_counts[w, t] / cls._uni_transition_counts[t])", "def effectivedb_size_percentage(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n retout = out.get(get_key(zonekeys.EFFDB_PER, self._SW_VER), None)\n if retout is not None:\n return str(retout) + \"%\"\n return None", "def calculate(self) -> float:", "def playerrawdmg(self):\n playerstr = globalvalues.p1.getstrength()\n # see combatvaluetable.xlsx to see some possible values of\n # playerrawdamage. Base formula is below:\n #\n rawdmg = int((playerstr - 4) * 102 * 0.32)\n\n # Things that will deviate the amount of damage done.\n level = globalvalues.p1.getlevel() - globalvalues.ai.getstatus()[0]\n modvalue = float(1 + level * 0.05)\n rngfactor = float(1 + float(random.randint(85, 105)) / 100)\n\n return int(rawdmg * modvalue * rngfactor)", "def percentage_change(old_value, new_value):\n\n result = float(100 * (new_value - old_value) / old_value)\n\n return result", "def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)", "def health_percentage(self) -> Union[int, float]:\n if not self.proto.health_max:\n return 0\n return self.proto.health / self.proto.health_max", "def health_percentage(self) -> Union[int, float]:\n if not self.proto.health_max:\n return 0\n return self.proto.health / self.proto.health_max" ]
[ "0.6973858", "0.68804467", "0.68694776", "0.6837882", "0.6696774", "0.65600723", "0.6460961", "0.6452967", "0.64353186", "0.6403429", "0.6403429", "0.6369753", "0.6368016", "0.63537014", "0.63537014", "0.63495165", "0.6336215", "0.632231", "0.62704694", "0.6268536", "0.6268536", "0.6254428", "0.6247955", "0.6245096", "0.62445295", "0.62402153", "0.62060624", "0.6184223", "0.61820304", "0.61702466", "0.6142152", "0.6132354", "0.6111506", "0.6107117", "0.6100469", "0.605308", "0.6049747", "0.6041493", "0.60408485", "0.6017065", "0.60161644", "0.6013737", "0.6000771", "0.6000771", "0.59895474", "0.5989106", "0.5988089", "0.59849375", "0.5980745", "0.597904", "0.59272516", "0.591619", "0.59120876", "0.59110737", "0.5907079", "0.5906812", "0.5903063", "0.59011483", "0.59008044", "0.5900661", "0.58983564", "0.5897182", "0.5894933", "0.5878522", "0.58627117", "0.58596164", "0.5855341", "0.5850596", "0.58428746", "0.58392054", "0.58179814", "0.5810939", "0.58104855", "0.58061993", "0.58019394", "0.5795628", "0.5781964", "0.57778084", "0.57708776", "0.57687443", "0.5763945", "0.576002", "0.5757731", "0.5753007", "0.5752791", "0.57383066", "0.57375306", "0.5713636", "0.5711137", "0.5709597", "0.5709597", "0.5704546", "0.5698478", "0.5696937", "0.56958026", "0.5695302", "0.56939775", "0.56873477", "0.5686056", "0.5686056" ]
0.77550447
0
Method which calculate Effective Field Goal (eTC) of a player
def set_effective_field_goal_percentage(self): bx = self.get_standard_stats() tcInt = float(bx["t2p_int"] + bx["t3p_int"]) tcConv = float(bx["t2p_conv"] + bx["t3p_conv"]) result = 0.00 if tcInt > 0: result = ((tcConv + (0.5 * float(bx["t3p_conv"]))) / tcInt) * 100 self.effective_field_goal_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getETA():", "def getETA():", "def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))", "def calculate_utility(state, player):\n thisPlayer = player\n \n if state.winner() == (not thisPlayer):\n return -BigInitialValue\n if state.winner() == thisPlayer:\n return BigInitialValue\n return calculate_possible_fours(state, thisPlayer) - calculate_possible_fours(state, not thisPlayer)", "def other(player):\n return 1 - player", "def other(player):\n return 1 - player", "def other(player):\n return 1 - player", "def calculate_titer(self):\n reactor = self.reactor\n (reactor.specification or reactor._run)()\n effluent = self.effluent\n # F_mass_products = effluent.imass[self.products].sum()\n # if F_mass_products: \n # return F_mass_products / effluent.F_vol\n # else:\n # return 0.\n return reactor.effluent_titer", "def calc_eta_FC(Q_load_W, Q_design_W, phi_threshold, approach_call):\n phi = 0.0\n\n ## Approach A - NREL Approach\n if approach_call == \"A\":\n\n phi = float(Q_load_W) / float(Q_design_W)\n eta_max = 0.425 # from energy.gov\n\n if phi >= phi_threshold: # from NREL-Shape\n eta_el = eta_max - ((1 / 6.0 * eta_max) / (1.0 - phi_threshold)) * abs(phi - phi_threshold)\n\n if phi < phi_threshold:\n if phi <= 118 / 520.0 * phi_threshold:\n eta_el = eta_max * 2 / 3 * (phi / (phi_threshold * 118 / 520.0))\n\n if phi < 0.5 * phi_threshold and phi >= 118 / 520.0 * phi_threshold:\n eta_el = eta_max * 2 / 3.0 + \\\n eta_max * 0.25 * (phi - phi_threshold * 118 / 520.0) / (phi_threshold * (0.5 - 118 / 520.0))\n\n if phi > 0.5 * phi_threshold and phi < phi_threshold:\n eta_el = eta_max * (2 / 3.0 + 0.25) + \\\n 1 / 12.0 * eta_max * (phi - phi_threshold * 0.5) / (phi_threshold * (1 - 0.5))\n\n eta_therm_max = 0.45 # constant, after energy.gov\n\n if phi < phi_threshold:\n eta_therm = 0.5 * eta_therm_max * (phi / phi_threshold)\n\n else:\n eta_therm = 0.5 * eta_therm_max * (1 + eta_therm_max * ((phi - phi_threshold) / (1 - phi_threshold)))\n\n ## Approach B - Empiric Approach\n if approach_call == \"B\":\n\n if Q_design_W > 0:\n phi = float(Q_load_W) / float(Q_design_W)\n\n else:\n phi = 0\n\n eta_el_max = 0.39\n eta_therm_max = 0.58 # * 1.11 as this source gives eff. of HHV\n eta_el_score = -0.220 + 5.277 * phi - 9.127 * phi ** 2 + 7.172 * phi ** 3 - 2.103 * phi ** 4\n eta_therm_score = 0.9 - 0.07 * phi + 0.17 * phi ** 2\n\n eta_el = eta_el_max * eta_el_score\n eta_therm = eta_therm_max * eta_therm_score\n\n if phi < 0.2:\n eta_el = 0\n\n return eta_el, eta_therm", "def e(p, reported, recount):\n return t(p, reported) - t(p, recount)", "def heuristic_2_reflection(game, player) -> float:\n\n reflection_available_factor = get_reflection_available_factor(game, player)\n\n return float(reflection_available_factor)", "def PV_BenefitDeath(t):\n if t > last_t:\n return 0\n else:\n return (-prj_bnft_Death(t) + PV_BenefitDeath(t + 1)) / (1 + DiscRate(t))", "def getEta(self):\n self.__eta = 3./8.*(1. - self.__alpha0 - self.__alpha1 - 2.*self.__beta)\n if self.__eta<0.: self.__eta=0. # erreur d'arrondi\n return self.__eta", "def hit_stand_ev_diff(hand, shoe, dealer_hand, dealer_probabilities):\n dealer_end_probs = dealer_probabilities[dealer_hand]\n # maps a player's hand to his or her (hit_ev, stand_ev, max_ev)\n player_payoffs = get_player_payoff(dealer_end_probs)\n\n ev = 0 # contains weighted ev\n total = 0 # contains total weights, to normalized at the end\n val, hard = hand\n for card in shoe:\n weight = shoe[card] # number of a card in the shoe\n total += weight\n if hard and 11 <= val <= 21:\n new_hand = (val + card, hard)\n if new_hand[0] > 21:\n ev -= weight # default loss\n else:\n ev += weight * player_payoffs[new_hand][-1]\n elif not hard and 12 <= val <= 21:\n new_val = val + card\n new_hard = False\n if new_val > 21: # go back to hard value, take A = 1\n new_val -= 10\n new_hard = True\n ev += weight * player_payoffs[(new_val, new_hard)][-1]\n elif hard and 4 <= val <= 10:\n new_val = val + card\n new_hard = True\n if card == 1: # go to soft value, take A = 11\n new_val += 10\n new_hard = False\n ev += weight * player_payoffs[(new_val, new_hard)][-1]\n else:\n raise RuntimeError(\"Should not get here: \" + str(hand))\n return (1.0 * ev / total) - player_payoffs[hand][1] # hit ev - stand ev", "def U_Function(currentPlayer, oppositePlayer, N, maxEntity):\n EndPointPlayer1 = Posisi.Posisi(N - 1, N - 1)\n EndPointPlayer2 = Posisi.Posisi(0, 0)\n \n sumPionPlayer1 = 0\n sumPionPlayer2 = 0\n\n if (currentPlayer.noPlayer == 1):\n for Pion in currentPlayer.arrayPion:\n sumPionPlayer1 += Pion.currentPosition.euclidean(EndPointPlayer1)\n for Pion in oppositePlayer.arrayPion:\n sumPionPlayer2 += Pion.currentPosition.euclidean(EndPointPlayer2)\n \n if (currentPlayer.noPlayer == 2):\n for Pion in currentPlayer.arrayPion:\n sumPionPlayer2 += Pion.currentPosition.euclidean(EndPointPlayer2)\n for Pion in oppositePlayer.arrayPion:\n sumPionPlayer1 += Pion.currentPosition.euclidean(EndPointPlayer1)\n\n if (maxEntity == 1):\n return -sumPionPlayer1 + sumPionPlayer2\n else:\n return -sumPionPlayer2 + sumPionPlayer1", "def airfoilEffT(self):\n return float(Importer(Component='Evaluations',\n VariableName='Wing airfoil efficiency factor',\n Default=.95,\n Path=self.filePath).getValue)", "def material_advantage(state, player):\n ma = 0\n if player == cc.WHITE_ACTIVE:\n white_factor = 1\n black_factor = -1\n else:\n white_factor = -1\n black_factor = 1\n \n for rank in range(8):\n for column in range(8):\n piece = state.board[rank, column]\n if piece == cc.NO_PIECE:\n continue\n elif piece in cc.WHITE_PIECES:\n if piece == cc.W_PAWN:\n ma += white_factor * cc.MA_PAWN\n elif piece == cc.W_KNIGHT:\n ma += white_factor * cc.MA_KNIGHT\n elif piece == cc.W_BISHOP:\n ma += white_factor * cc.MA_BISHOP\n elif piece == cc.W_ROOK:\n ma += white_factor * cc.MA_ROOK\n elif piece == cc.W_QUEEN:\n ma += white_factor * cc.MA_QUEEN\n elif piece == cc.W_KING:\n ma += white_factor * cc.MA_KING\n elif piece in cc.BLACK_PIECES:\n if piece == cc.B_PAWN:\n ma += black_factor * cc.MA_PAWN\n elif piece == cc.B_KNIGHT:\n ma += black_factor * cc.MA_KNIGHT\n elif piece == cc.B_BISHOP:\n ma += black_factor * cc.MA_BISHOP\n elif piece == cc.B_ROOK:\n ma += black_factor * cc.MA_ROOK\n elif piece == cc.B_QUEEN:\n ma += black_factor * cc.MA_QUEEN\n elif piece == cc.B_KING:\n ma += black_factor * cc.MA_KING\n return ma", "def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the improved score\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n improved = len(player_legal_moves) - len(opponent_legal_moves)\n if improved != 0:\n return float(improved)\n \n # Second get differences from center\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n player_coordinates = game.get_player_location(player)\n opponent_coordinates = game.get_player_location(opponent)\n player_center_dist = get_distances_from_center(center_coordinates, player_coordinates)\n opponent_center_dist = get_distances_from_center(center_coordinates, opponent_coordinates)\n center_dist_diff = player_center_dist - opponent_center_dist\n \n # Third obtain next_moves\n player_next_moves = [get_next_moves(game, move, list(move)) for move in player_legal_moves]\n opponent_next_moves = [get_next_moves(game, move, list(move)) for move in opponent_legal_moves] \n improved_next = len(player_next_moves) - len(opponent_next_moves)\n \n # Put player and opponent feature differences in a tuple/vector surrogoate\n feature_diff_vector = (improved, center_dist_diff, improved_next)\n \n # Provide a weighting vector for the features of each player-participant\n weight_vector = (1.5,0.1,1.0)\n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(feature_diff_vector, weight_vector))\n \n return float(weighted_difference_dot_product)", "def calc_ertelPV(n2, bx, rel_vorticity, g=9.8,f=-1e-4):\n\n # vertical component\n\n qvert = (f+rel_vorticity)*n2\n\n # baroclinic component\n qbc = -bx**2/f\n\n # Ertel PV\n\n ertelPV = qvert + qbc\n\n # If PV is unstable\n fq = ertelPV*f # fq > 0 stable\n\n return ertelPV, qvert, qbc, fq", "def do_damage(self) -> float:\n sum = 0\n for operator in self.__operators:\n if operator.is_alive:\n operator.experience += 1\n sum += operator.experience / 100\n return 0.1 + sum", "def KentFosterII_calc(TP, FP, FN, TN):\n try:\n part1 = ((TN + FP) * (TN + FN)) / (TN + FP + FN)\n return (TN - part1) / (TN - part1 + FP + FN)\n except Exception:\n return \"None\"", "def gtf(self):\n\t #if tank is empty, conductance is 0\n\t if self.tx <= 0:\n\t return 0.\n\t\t#returns 0.5, as a function of TAI\n\t else:\n\t return 0.5", "def evaluate(game, player):\n weights = [2, 200, 2000, 20000]\n reward = 0\n opponent = get_opponent(player)\n for length in range(2, 6):\n reward += weights[length - 2] * get_num_series(game, player, length)\n reward -= weights[length - 2] * get_num_series(game, opponent, length)\n return reward", "def compute_uct(self):\n if self.visits != 0:\n return - self.reward / self.visits + self.C * math.sqrt(math.log(self.parent.visits) / self.visits)\n else:\n return float('inf')", "def calculate_advantage(stage_0, stage_1):\n # Improvement in hp difference is good.\n hp_pct_0 = (float(stage_0.friendly_life)/MAX_FRIENDLY_LIFE) - (float(stage_0.enemy_life)/MAX_ENEMY_LIFE)\n hp_pct_1 = (float(stage_1.friendly_life)/MAX_FRIENDLY_LIFE) - (float(stage_1.enemy_life)/MAX_ENEMY_LIFE)\n return hp_pct_1 - hp_pct_0", "def fermi(E,mu,T):\n if (E-mu)/T > 600:\n f=0\n\t\t\t\t\n else:\n f=1/(math.exp((E-mu)/(kB*T) )+1)\n return(f)", "def Eg_fct_T(Eg0,alpha,beta,T) :\n return Eg0-((T*T*alpha*1e-3)/(beta+T))", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def calc_ev(p1_strat, p2_strat, cards, history, active_player):\n if KuhnPoker.is_terminal(history):\n return -KuhnPoker.get_payoff(history, cards)\n my_card = cards[active_player]\n next_player = (active_player + 1) % 2\n if active_player == 0:\n strat = p1_strat[my_card + history]\n else:\n strat = p2_strat[my_card + history]\n return -np.dot(strat, [calc_ev(p1_strat, p2_strat, cards, history + a, next_player) for a in KUHN_ACTIONS])", "def eval_func(self, game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n \n if game.is_winner(player):\n return float(\"inf\")\n \n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n if game.move_count < ((game.height * game.width)/2):\n return float(self.weights[0] * own_moves - \n self.weights[1] * opp_moves - \n self.weights[2] * __distance_from_center__(game, player))\n else:\n return float(self.weights[3] * own_moves - \n self.weights[4] * opp_moves - \n self.weights[5] * __distance_from_center__(game, player))", "def energy(e: float) -> float:\n\n return (1/np.sqrt(2))*(gamma(-e/2+1/2)/(gamma(-e/2+3/4)))", "def _fv(self):\n return self.beta * (self.x ** self.c)", "def compute_utility(self, board, move, player):\n r_alive = 0\n b_alive = 0\n rk_alive = 0\n bk_alive = 0\n for line in range(8):\n for col in range(8):\n if board[line][col] == \"R\":\n r_alive += 1\n elif board[line][col] == \"B\":\n b_alive += 1\n elif board[line][col] == \"RK\":\n rk_alive += 1\n elif board[line][col] == \"BK\":\n bk_alive += 1\n # if r_Alive > b_Alive:\n # if b_Alive == 0:\n # return 1\n # else: return 0\n # elif r_Alive == 0:\n # return -1\n powkings = 1.2\n result = 0\n if player == 'B':\n result = rk_alive*powkings + r_alive - bk_alive*powkings - b_alive\n else:\n result = bk_alive*powkings + b_alive - rk_alive*powkings - r_alive\n return result", "def heuristic_combined_2_3(game, player) -> float:\n\n reflection_available_factor = get_reflection_available_factor(game, player)\n partition_possible_factor = get_partition_possible_factor(game, player)\n\n return float(reflection_available_factor + partition_possible_factor)", "def do_damage(self) -> float:\n res = 0.05 + self.experience / 100\n self.experience = self.experience + 1\n return res", "def get_cost(self) -> float:\n return math.e / self.fitness", "def calculate(self) -> float:", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n\n agreement = tp + tn\n chance0 = (tn + fn) * (tn + fp)\n chance1 = (fp + tp) * (fn + tp)\n sum_ = tn + fn + fp + tp\n chance = (chance0 + chance1) / sum_\n\n return (agreement - chance) / (sum_ - chance)", "def compute_efftime(table,\n kterm=0.114, # KERM\n ebv_r_coeff=2.165,\n fiber_diameter_arcsec=1.52):\n\n exptime = table[\"EXPTIME\"]\n skymag = table[\"SKY_MAG_R_SPEC\"]\n sky = 10**(-0.4*(skymag-22.5)) # nMgy/arcsec**2\n ebv = table[\"EBV\"]\n transparency = table[\"TRANSPARENCY_GFA\"]\n airmass = table[\"AIRMASS\"]\n fiberfac_psf = table[\"FIBERFAC_GFA\"] # fiber_frac * transparency normalized to 1 for nominal conditions\n fiberfac_elg = table[\"FIBERFAC_ELG_GFA\"] # fiber_frac * transparency normalized to 1 for nominal conditions\n fiberfac_bgs = table[\"FIBERFAC_BGS_GFA\"] # fiber_frac * transparency normalized to 1 for nominal conditions\n\n fiber_fracflux_bgs = table[\"FIBER_FRACFLUX_BGS_GFA\"] # fraction of light down fiber\n fiber_fracflux_psf = table[\"FIBER_FRACFLUX_GFA\"]\n\n exptime_nom = 1000.0 # AR seconds\n sky_nom = 3.73 # AR nMgy/arcsec**2\n flux_bright_nom = 15.8 # nMgy (r=19.5 mag for de Vaucouleurs rhalf=1.5\" BGS)\n flux_backup_nom = 27.5 # nMgy (r=18.9 mag star)\n\n # AR airmass term\n airfac = 10.0 ** (kterm * (airmass - 1.0) / 2.5)\n # AR ebv term\n ebvfac = 10.0 ** (ebv_r_coeff * ebv / 2.5)\n # AR sky readnoise\n sky_rdn = 0.932 # AR nMgy/arcsec**2\n\n # AR \"limit\" fiber flux\n fiber_area_arcsec2 = np.pi*(fiber_diameter_arcsec/2)**2\n\n # flux in fiber artificially divided by fiber_area_arcsec2 because the sky flux is per arcsec2\n fflux_bright = flux_bright_nom * transparency * fiber_fracflux_bgs / airfac / ebvfac / fiber_area_arcsec2\n fflux_backup = flux_backup_nom * transparency * fiber_fracflux_psf / airfac / ebvfac / fiber_area_arcsec2\n\n # AR effective sky\n effsky_dark = (sky + sky_rdn * exptime_nom / exptime) / (1.0 + sky_rdn / sky_nom)\n effsky_bright = (sky + sky_rdn * exptime_nom / exptime + fflux_bright) / (\n 1.0 + sky_rdn / sky_nom + fflux_bright / sky_nom\n )\n effsky_backup = (sky + sky_rdn * exptime_nom / exptime + fflux_backup) / (\n 1.0 + sky_rdn / sky_nom + fflux_backup / sky_nom\n )\n # AR effective exposure time\n efftime_dark = (\n exptime\n * (fiberfac_elg / airfac) ** 2\n * (sky_nom / effsky_dark)\n / ebvfac ** 2\n )\n efftime_bright = (\n exptime\n * (fiberfac_bgs / airfac) ** 2\n * (sky_nom / effsky_bright)\n / ebvfac ** 2\n )\n efftime_backup = (\n exptime\n * (fiberfac_psf / airfac) ** 2\n * (sky_nom / effsky_backup)\n / ebvfac ** 2\n )\n\n # set to -1 values with incorrect inputs\n bad=table[\"AIRMASS\"]<0.99\n bad |=(table[\"FIBER_FRACFLUX_GFA\"]==0)\n bad |=(table[\"TRANSPARENCY_GFA\"]>2)\n efftime_dark[bad]=0.\n efftime_bright[bad]=0.\n efftime_backup[bad]=0.\n\n return efftime_dark , efftime_bright , efftime_backup", "def getReward (events_counters):\n global P_0, P_1, C_0, C_1, C_2 \n return (P_0 - C_0) * events_counters[0] - (C_0 + C_1) * events_counters[1] - (\n C_2 * events_counters[2] - P_1 * events_counters[3])", "def custom_score_3(game, player):\n \n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n player_legal_move_count, opponent_legal_move_count = \\\n len(player_legal_moves), len(opponent_legal_moves)\n move_count_difference = player_legal_move_count - opponent_legal_move_count\n # Find coordinates of center box\n h, w = get_center_coordinates(game)\n # Retrieve player's coordinates\n y, x = game.get_player_location(player)\n # Obtain coordinate further, closest to origin\n furthest_coord, closest_coord = max(h - y, w -x), min(h - y, w - x)\n # Return weighted, vector-valued length from origin / sum of weights\n weighted_distance_from_center = \\\n math.sqrt((closest_coord**2 + 2*(furthest_coord**2)))/3\n feature_vector = (move_count_difference, weighted_distance_from_center)\n \n weight_vector = (1.0,0.1)\n \n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(weight_vector, feature_vector)) \n \n return float(weighted_difference_dot_product)", "def determineAmountToCall(self, player):\n\t\treturn sum(self.currentBet) - sum(player.betAmount)", "def acceleration(v,u,t):\n return ((v-u)/t)", "def KentFosterI_calc(TP, FP, FN, TN):\n try:\n part1 = ((TP + FP) * (TP + FN)) / (TP + FP + FN)\n return (TP - part1) / (TP - part1 + FP + FN)\n except Exception:\n return \"None\"", "def vanilaScore(self,attended,state,W):", "def heuristic_combined_1_2(game, player) -> float:\n\n center_available_factor = get_center_available_factor(game, player)\n reflection_available_factor = get_reflection_available_factor(game, player)\n\n return float(center_available_factor + reflection_available_factor)", "def get_delta_v_tot(f, e, a, P):\n\n coeff = (2.0*np.pi/P) * a / np.sqrt(1.0 - e*e)\n delta_v_tot = coeff * (1.0 + 2.0*e*np.cos(f) + e*e) / 1.0e5\n\n return delta_v_tot", "def time_elapsed(session, player):\n #TODO (also needs to be added to bot logic)", "def self_energy(gf_imp0, gf_imp):\n return 1/gf_imp0 - 1/gf_imp", "def cal_eta(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for eta routine)')\n \n theta=math.acos(self.pz/math.sqrt(self.px**2+self.py**2+self.pz**2))\n self.eta=-math.log(math.tan(theta/2.0))", "def advancedStats():", "def report_result(force_a_before, force_b_before, force_a_after, force_b_after):\n damage_a = 0.0\n damage_b = 0.0\n ################################# YOUR CODE HERE #################################\n damage_a = calculate_training_cost(force_a_before) - calculate_training_cost(force_a_after)\n damage_b = calculate_training_cost(force_b_before) - calculate_training_cost(force_b_after)\n ##################################################################################\n return damage_a, damage_b", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the base information to calculate player & opponent\n # feature values\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n if len(player_legal_moves) != len(opponent_legal_moves):\n return float(len(player_legal_moves) - len(opponent_legal_moves))\n \n # Get_center_coordinates and opponent. Then set the list of participants\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n participants = [player, opponent]\n \n # Then, for each participant obtain his/her feature values \n for participant in participants:\n if participant == player:\n p_legal_moves = player_legal_moves\n player_either = player\n participant_coordinates = p_y, p_x = \\\n game.get_player_location(participant)\n player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, \\\n player_path_count, player_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either,participant_coordinates, p_legal_moves)\n else:\n p_legal_moves = opponent_legal_moves\n player_either = opponent\n participant_coordinates = p_y, p_x \\\n = game.get_player_location(participant)\n opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, opponent_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either, participant_coordinates, p_legal_moves)\n \n # Place each participant's feature values in a tuple/vector surrogate \n pro_player_vector = \\\n (player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, player_path_count, \\\n opponent_min_center_diff)\n pro_opponent_vector = \\\n (opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, player_min_center_diff)\n \n # Provide a weighting vector for the features \n weight_vector = (1.5,0.1,1.0,0.001,0.001,0.001)\n \n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*(q-r ) for p,q,r \\\n in zip(weight_vector, pro_player_vector, pro_opponent_vector))\n \n return float(weighted_difference_dot_product)", "def cost(self) -> float:", "def value(self, board, current_player, opposite_player):\n my_fours = self.check_for_streak(board, current_player, 4)\n my_threes = self.check_for_streak(board, current_player, 3)\n my_twos = self.check_for_streak(board, current_player, 2)\n opp_fours = self.check_for_streak(board, opposite_player, 4)\n if opp_fours > 0:\n return -100000\n else:\n return my_fours*100000 + my_threes*100 + my_twos", "def _evaluate_electric(snapshot, params):\n positions = snapshot.particles.position\n charges = snapshot.particles.charge\n E_field = params\n energies = -charges * np.dot(positions, E_field)\n forces = np.outer(charges, E_field)\n return forces, energies", "def act(self, image, player_info):\n action = {'acceleration': 1, 'brake': False, 'drift': False, 'nitro': False, 'rescue': False, 'steer': np.random.uniform(-1,1)}\n \"\"\"\n Your code here.\n \"\"\"\n\n return action", "def playerdefeated(self):\n globalvalues.gameover_combat()", "def eta_details(self):\n\t\t# Experimentation gives you 72pts to a random science every production\n\t\t# Stupid brute force implementation for now\n\t\trequired = self.required\n\t\trate = self.player.science\n\t\tdef combine(base, add, add_time, chance):\n\t\t\t# add given add into base with +add_time tick and modified by chance\n\t\t\tfor time, p in add.items():\n\t\t\t\ttime += add_time\n\t\t\t\tp *= chance\n\t\t\t\tbase[time] = base.get(time, 0) + p\n\t\tdef _eta_details(value, time_to_prod=self.galaxy.production_rate):\n\t\t\tnaive_eta = max(0, int(math.ceil((required - value)/rate)))\n\t\t\tif naive_eta <= time_to_prod: return {naive_eta: 1}\n\t\t\tbase = {}\n\t\t\twithout_extra = _eta_details(value + rate*time_to_prod)\n\t\t\twith_extra = _eta_details(value + rate*time_to_prod + 72)\n\t\t\tcombine(base, without_extra, time_to_prod, 6/7.)\n\t\t\tcombine(base, with_extra, time_to_prod, 1/7.)\n\t\t\treturn base\n\t\treturn _eta_details(self.current, self.galaxy.production_rate - self.galaxy.production_counter)", "def _compute_f(self, p, dh, dv):\n return dh / (self.beta * p * dv)", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n pacmanPos = currentGameState.getPacmanPosition()\n\n food = currentGameState.getFood()\n capsules = currentGameState.getCapsules()\n return currentGameState.getScore() - 10 * capsuleDistancePlan(pacmanPos, capsules) - foodDistPlan(pacmanPos, food)", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n\n return 1 - abs(fn - fp) / (2 * tp + fn + fp)", "def delta_energy(atom,layer1,layer2):\n global r,c,h\n return float('%.2E' % Decimal(str(r*((atom**2/layer1**2)-(atom**2/layer2**2)))))", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n \n newPos = currentGameState.getPacmanPosition()\n newFood = currentGameState.getFood()\n newGhostStates = currentGameState.getGhostStates()\n GhostLocs = currentGameState.getGhostPositions()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n capsuleLocations = currentGameState.getCapsules()\n Hueristic = 0.0\n \n if currentGameState.isWin():\n return 10000\n if currentGameState.isLose():\n return -10000\n\n FoodDistances = []\n foodLocations = newFood.asList()\n for food in foodLocations:\n FoodDistances.append(manhattanDistance(newPos,food))\n closestFood = min(FoodDistances)\n closestFoodLocation = foodLocations[FoodDistances.index(closestFood)]\n\n GhostsToMe = []\n GhostsToFood = []\n for ghost in GhostLocs:\n GhostsToMe.append(manhattanDistance(newPos,ghost))\n GhostsToFood.append(manhattanDistance(closestFoodLocation,ghost))\n closestGhostToMe = min(GhostsToMe)\n closestGhostToClosestFood = min(GhostsToFood)\n closestGhostLocation = GhostLocs[GhostsToMe.index(closestGhostToMe)]\n\n if newPos in currentGameState.getCapsules():\n capsule = 100\n else: \n capsule = 0\n \n if closestGhostToClosestFood < closestFood:\n if closestGhostToMe > 4:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*50 - (1/closestGhostToMe)*5\n else:\n Hueristic = (-1/closestGhostToMe)*50\n else:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*50 - (1/closestGhostToMe)*5\n return Hueristic", "def get_f_score(self):\n return self.get_g_score() + self.get_h_score()", "def pot_energy_morse(r_12, D, alpha,r_e):\n\n potential=float(D)*(((1.0-m.exp(-float(alpha)*(float(r_12)-float(r_e))))**2.0)-1.0)\n return potential", "def E(self, t):\n\n\t\tE = self.E0\n\n\t\t# Gaussian pulse shape\n\t\tE *= np.exp(-2.*np.log(2.)*((t-self.t0)/self.pulse_duration)**2.)\n\n\t\t# Instantaneous phase\n\t\tif self.phase:\n\t\t\tE *= np.cos(self.omega*(t-self.t0))\n\n\t\t# Transmition\n\t\tif self.remove_reflected_part and self.domain.D == 0:\n\t\t\tmaterial = self.domain.materials[0]\n\t\t\tE *= ((1.-material.Reflectivity)/material._Drude_index.real)**0.5\n\n\t\treturn E", "def deathSubtractor(self, damage, target, caller):\n target_body = target.db.body\n target_bleed_points = target.db.bleed_points\n target_death_points = target.db.death_points\n\n if target_body and damage:\n body_damage = target_body - damage\n if body_damage < 0:\n damage = abs(body_damage)\n target.db.body = 0\n else:\n target.db.body = body_damage\n damage = 0\n\n if target_bleed_points and damage:\n bleed_damage = target_bleed_points - damage\n if bleed_damage < 0:\n damage = abs(bleed_damage)\n target.db.bleed_points = 0\n target.db.weakness = 1\n else:\n target.db.bleed_points = bleed_damage\n damage = 0\n target.db.weakness = 1\n\n target.msg(\"|430You are bleeding profusely from many wounds and can no longer use any active martial skills.\\n|n\")\n target.location.msg_contents(f\"{target.key} |025is bleeding profusely from many wounds and will soon lose consciousness.|n\")\n\n\n if target_death_points and damage:\n death_damage = target_death_points - damage\n if death_damage < 0:\n damage = abs(death_damage)\n target.db.death_points = 0\n else:\n target.db.death_points = death_damage\n damage = 0\n\n target.msg(\"|300You are unconscious and can no longer move of your own volition.|n\")\n target.location.msg_contents(f\"{target.key} |025does not seem to be moving.|n\")\n\n else:\n pass", "def compute(self) -> torch.Tensor:\n return _fbeta_compute(self.true_positives, self.predicted_positives,\n self.actual_positives, self.beta, self.average)", "def _decisionFunction(self):\n #THIS IS WHERE THE INTELLIGENT AGENT CODE MAKES DECISION\n #since this is the hand coded extension, I'm just going to hard code some stuff\n #q learning and Sarsa should hopefully do better\n \n #this is some other hand coded stuff that you read in stone's paper\n c1 = 64 #c1 = distance in pixels\n c2 = 2.5#c2 = something to multiply angle by\n c3 = 77 #c3 is the number of pixels you assume are in 5 meteres\n #state variable 7 is distance in pixels from K1 to T1\n if self.stateVariables[7] > c1:\n self._holdBall()\n else:\n passMax = float(\"-Inf\")\n passMaxArg = None\n for i in range(1,3):\n var = (c2 * arccos(self.stateVariables[10+i])) + (self.stateVariables[8+i] / c3)\n \"\"\"\n print(\"var = \", var)\n print(\"stateVariable[\", 10 + i, \"]=\", self.stateVariables[10+i] )\n print(\"arccos of stateVariable[\", 10 + i, \"]=\", arccos(self.stateVariables[10+i]) )\n print(\"stateVariable[\", 8 + i, \"]=\", self.stateVariables[8+i] )\n \"\"\"\n \n if var > passMax:\n passMax = var\n passMaxArg = i\n self._passBall(passMaxArg)", "def generate_transE_score(self, hs, ts, r):\n\n all_embeddings = self._get_ego_embeddings()\n h_e = all_embeddings[hs]\n t_e = all_embeddings[ts]\n r_e = self.relation_embedding.weight[r]\n r_trans_w = self.trans_w.weight[r].view(\n self.embedding_size, self.kg_embedding_size\n )\n\n h_e = torch.matmul(h_e, r_trans_w)\n t_e = torch.matmul(t_e, r_trans_w)\n\n kg_score = torch.mul(t_e, self.tanh(h_e + r_e)).sum(dim=1)\n\n return kg_score", "def fight(self):\r\n\t\tif self.death():\r\n\t\t\treturn 0\r\n\t\tif self.ctime < 1:\r\n\t\t\tself.ctime += 0.05\r\n\t\telse:\r\n\t\t\tself.ctime = 0\r\n\t\t\tself.hit()", "def test_positive_electrode_potential_profile(self):\n\n # TODO: add these when have averages", "def TPR(self):\n return _div(self.TP, self.TP + self.FN)", "def get_expected_cost(self):", "def p_funct(self, yi, p, t): \t\t\n\t\t\n\t\tglobal Cg \n\t\tCg = abs(yi[0])\n\t\tglobal Ch \n\t\tCh = abs(yi[1])\n\t\tglobal tin_g \n\t\ttin_g = abs(yi[2])\n\t\tglobal tin_h \n\t\ttin_h = abs(yi[3])\n\t\t\n\t\ty = state_at(t)\n\t\trisk = 1 - (y[-1][0] + y[-1][3]) / sum(y[-1])\n\t\t\n\t\treturn abs(risk - p)", "def compute_utility(board, color):\n player1_score = 0\n player2_score = 0\n\n score = get_score(board)\n if color == 1:\n return score[0] - score[1]\n else:\n return score[1] - score[0]", "def etrf_func(et_inst, etr):\n return et_inst / etr", "def rhs_fenics(y,t):\n #print \"time: \",t\n uprev.vector()[:]=y\n f.t = t #dolfin needs to know the current time for cos(t)\n uprime_solver.solve()\n return uprime_solution.vector().array()", "def evaluate(self):\n # if player has no move, then player lost, -inf or inf depend on who the player is\n # if player has moves, use heuristics.\n \n #checkColorMoves = self.getAvailableMoves(self.colorIndex)\n #otherColorMoves = self.getAvailableMoves(1-self.colorIndex)\n \n checkColorMoves = self.getAvailableMovesPreferLonger(self.colorIndex)\n otherColorMoves = self.getAvailableMovesPreferLonger(1-self.colorIndex)\n\n checkColorPieces = self.getPieceCount(self.colorIndex)\n otherColorPieces = self.getPieceCount(1-self.colorIndex)\n\n #checkColorEdgePieces = self.getEgdePieceCount(self.colorIndex)\n #otherColorEdgePieces = self.getEgdePieceCount(1-self.colorIndex)\n\n if self.player == 'computer':\n if checkColorMoves == 0: #computer doesn't have moves\n return float('-inf')\n elif otherColorMoves == 0: #user doesn't have moves\n return float('inf')\n else:\n #return checkColorPieces - otherColorPieces\n return checkColorMoves - otherColorMoves\n else:\n if checkColorMoves == 0: #user doesn't have moves\n return float('inf')\n elif otherColorMoves == 0: #computer doesn't have moves\n return float('-inf')\n else:\n #return otherColorPieces - checkColorPieces\n return otherColorMoves - checkColorMoves", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n\n n = tp + tn + fp + fn\n e1 = (fn * (fn + 2 * tp) / (tp + fn) + fp * (fp + 2 * tn) / (tn + fp)) / n\n e2 = (fp * (fp + 2 * tp) / (tp + fp) + fn * (fn + 2 * tn) / (tn + fn)) / n\n\n return min(e1, e2)", "def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)", "def reward(self, player, winning_state):\n if winning_state == \"Tie\":\n return 1\n elif winning_state == \"Resume\":\n return -1\n else:\n if player == \"agent\":\n return 10\n else:\n return -10", "def tdew_from_ea(ea):\n return (237.3 * np.log(ea / 0.6108)) / (17.27 - np.log(ea / 0.6108))", "def calculate_value(self, hand):\n global FACE_CARDS\n #could refactor the 2 hand possiblities into methods of a Dealer and Player Class\n if hand == \"player\":\n if self.player_hand[-1].value in FACE_CARDS:\n self.player_value += 10\n elif self.player_hand[-1].value == \"A\":\n self.player_value += 11\n self.player_ace_count += 1\n else:\n self.player_value += int(self.player_hand[-1].value)\n\n if self.player_value > 21:\n if self.player_ace_count > self.player_almost_bust:\n #To prevent a Bust, your Ace became a one\n self.player_value -= 10\n self.player_almost_bust += 1\n else:\n self.player_lose()\n elif self.player_value == 21:\n self.blackjack = True\n self.endgame()\n\n elif hand == \"dealer\":\n if len(self.dealer_hand) > 1:\n if self.dealer_hand[-1].value in FACE_CARDS:\n self.dealer_value += 10\n elif self.dealer_hand[-1].value == \"A\":\n self.dealer_value += 11\n self.dealer_ace_count += 1\n else:\n self.dealer_value += int(self.dealer_hand[-1].value)\n\n if self.dealer_value > 21:\n if self.dealer_ace_count > self.dealer_almost_bust:\n #To prevent a Bust, the Dealer's Ace became a one\n self.dealer_value -= 10\n self.dealer_almost_bust += 1\n else:\n self.player_win()\n elif self.dealer_value == 21:\n self.player_lose()", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n\n return tp / (tp + fp + fn)", "def KendallTau_calc(TP, FP, FN, TN):\n try:\n n = TP + FP + FN + TN\n return (2 * (TP + TN - FP - FN)) / (n * (n - 1))\n except Exception:\n return \"None\"", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n GhostLocs = successorGameState.getGhostPositions()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n capsuleLocations = successorGameState.getCapsules()\n\n \"*** YOUR CODE HERE ***\"\n \"\"\" factors: proximity to food, proximity to ghosts \n \"\"\" \n if successorGameState.isWin():\n return 10000\n if successorGameState.isLose():\n return -10000\n\n FoodDistances = []\n foodLocations = newFood.asList()\n for food in foodLocations:\n FoodDistances.append(manhattanDistance(newPos,food))\n closestFood = min(FoodDistances)\n closestFoodLocation = foodLocations[FoodDistances.index(closestFood)]\n\n\n GhostsToMe = []\n GhostsToFood = []\n for ghost in GhostLocs:\n GhostsToMe.append(manhattanDistance(newPos,ghost))\n GhostsToFood.append(manhattanDistance(closestFoodLocation,ghost))\n closestGhostToMe = min(GhostsToMe)\n closestGhostToClosestFood = min(GhostsToFood)\n closestGhostLocation = GhostLocs[GhostsToMe.index(closestGhostToMe)] \n Hueristic = 0.0\n if closestGhostToClosestFood < closestFood:\n if closestGhostToMe > 5:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*10 - (1/closestGhostToMe)*5\n else:\n Hueristic = (-1/closestGhostToMe)*10000\n #Ghost is closer to me than nearest food so avoid ghost\n else:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*10 - (1/closestGhostToMe)*5\n return Hueristic", "def _get_reward(self, player_score, opponent_score):\n return player_score - opponent_score", "def test_get_team_strength(self):\n pass", "def ti_func(self):\n return self.ti.val - self.calc_ti()", "def life_value(ship_class, t):\n\n # Read the external CSV file with the coefficient values\n life_coefficients = pd.read_csv(\"life_coefficients.csv\", index_col=0, header=0)\n # Character of construction coefficient\n a = life_coefficients.loc[ship_class, \"character_of_construction\"]\n # Above-water tonnage coefficient\n b = life_coefficients.loc[ship_class, \"above_water_tonnage\"]\n # Q is unity for 14-inch shells\n q = 1\n # Square root of the ratio of total area to area of vitals\n sqrt_r = life_coefficients.loc[ship_class, \"sqrt_area_to_vitals\"]\n # Probability factor (Construction of Fire Effect Tables 1922, p. 31)\n p = (b * t ** (2/3)) / ((b * t ** (2/3)) + 3)\n # Life formula (Construction of Fire Effect Tables 1922, p. 31)\n life = a * (p * sqrt_r) * (b * t ** (1/3))\n return round(life, 2)", "def manage_ev(agent, total_power, time_scale):\n # Dismantle the tuple, it was there just for compactness\n total_active_power, total_reactive_power = total_power\n if agent.electrical_vehicle.is_active():\n # If EV is in supply mode, then it acts as an additional battery\n if agent.electrical_vehicle.power_supplier == 1:\n\n # Compute the desired active and reactive powers according to the power demand and\n # the contribution of the EV\n desired_active_power_from_electrical_vehicle = total_active_power * \\\n agent.electrical_vehicle.contribution_active\n desired_reactive_power_from_electrical_vehicle = total_reactive_power * \\\n agent.electrical_vehicle.contribution_reactive\n desired_power_from_electrical_vehicle = np.abs(\n np.complex(desired_active_power_from_electrical_vehicle,\n desired_reactive_power_from_electrical_vehicle))\n\n # Try to get the power out of the EV\n if agent.electrical_vehicle.erogate(power=desired_power_from_electrical_vehicle, time_scale=time_scale):\n # If you manage to get the power that you want, then update the demand\n total_active_power -= desired_active_power_from_electrical_vehicle\n total_reactive_power -= desired_reactive_power_from_electrical_vehicle\n\n # If EV is in charging mode, then it gets power from the grid\n else:\n total_active_power += agent.electrical_vehicle.charge_current * agent.voltage_rating # To be checked (\n # Riccardo)\n agent.electrical_vehicle.charge(current=agent.electrical_vehicle.charge_current, time_scale=time_scale)\n return total_active_power, total_reactive_power", "def __toonTrackExp(self, toonId, track):\n # CCC look at next attacks that are the same track, take the max\n # toon track exp and use that for this attack\n toon = self.battle.getToon(toonId)\n if (toon != None):\n toonExpLvl = toon.experience.getExpLevel(track)\n exp = self.AttackExpPerTrack[toonExpLvl]\n if track == HEAL:\n exp = exp * 0.5\n self.notify.debug(\"Toon track exp: \" + str(toonExpLvl) +\n \" and resulting acc bonus: \" + str(exp))\n return exp\n else:\n return 0", "def calculate(self):\n\n return 2 * self.confusion_matrix.tp / \\\n (2 * self.confusion_matrix.tp + self.confusion_matrix.fp + self.confusion_matrix.fn)", "def effectiveness(self):\n self._effectiveness = 0.20 * self.ANA + 0.20 * self.DAM + 0.20 * self.MOA + 0.20 * self.MFA + 0.20 * self.NOP\n return round(self._effectiveness, 5)", "def set_offensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0 and (bx[\"t2p_int\"] + bx[\"t3p_int\"]) > 0:\n fgm = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n fga = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n team_fgm = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n team_fga = team[\"t2p_int\"] + team[\"t3p_int\"]\n team_points = team[\"t2p_conv\"]*2 + team[\"t3p_conv\"]*3 + team[\"tl_conv\"]\n points = bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"]\n\n try:\n qAST = (Decimal(bx[\"minutes\"] / (team[\"minutes\"] / 5)) * (Decimal('1.14') * Decimal((team[\"assists\"] - bx[\"assists\"]) / team_fgm))) + \\\n Decimal((((team[\"assists\"] / team[\"minutes\"]) * bx[\"minutes\"] * 5 - bx[\"assists\"]) / ((team_fgm / team[\"minutes\"]) * bx[\"minutes\"] * 5 - fgm)) * (1 - (bx[\"minutes\"] / (team[\"minutes\"] / 5))))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n qAST = 1\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n qAST = 1\n\n fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n ast_part = Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2*(team_fga - fga))) * bx[\"assists\"]\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n ast_part = 0\n\n if bx[\"tl_int\"] > 0:\n ft_part = Decimal(1 - (1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_part = 0\n team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team[\"tl_conv\"] / team[\"tl_int\"]))**2) * team[\"tl_int\"] * Decimal('0.4'))\n try:\n team_orb_percentage = Decimal(team[\"reb_of\"] / (team[\"reb_of\"] + ((opp_team[\"reb_def\"] + opp_team[\"reb_of\"]) - opp_team[\"reb_of\"])))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n\n team_play_percentage = Decimal(team_scoring_poss / (team_fga + team[\"tl_int\"] * Decimal('0.4') + team[\"turnovers\"]))\n try:\n team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_weight = 0\n\n orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage\n\n fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)\n if bx[\"tl_conv\"] > 0:\n ft_x_poss = Decimal((1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_x_poss = Decimal(1 - (bx[\"tl_conv\"] / 1)**2) * Decimal('0.4') * bx[\"tl_int\"]\n try:\n sc_poss = (fg_part + ast_part + ft_part) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n sc_poss =0\n\n tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx[\"turnovers\"]\n\n pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx[\"t3p_conv\"]) * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team[\"t3p_conv\"] - bx[\"t3p_conv\"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2 * (team_fga - fga))) * bx[\"assists\"]\n except:\n pprod_ast_part = 0\n\n pprod_orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team[\"tl_conv\"] / team[\"tl_int\"])**2) * Decimal('0.4') * team[\"tl_int\"]))\n try:\n pprod = (pprod_fg_part + pprod_ast_part + bx[\"tl_conv\"]) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n pprod = 0\n\n try:\n result = 100 * (pprod / tot_poss)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n result = 0\n\n # print(\"fgm: \" + str(fgm))\n # print(\"fga: \" + str(fga))\n # print(\"team_fgm: \" + str(team_fgm))\n # print(\"team_fga: \" + str(team_fga))\n # print(\"team_points: \" + str(team_points))\n # print(\"points: \" + str(points))\n # print(\"qAST: \" + str(qAST))\n # print(\"fg_part: \" + str(fg_part))\n # print(\"ast_part: \" + str(ast_part))\n # print(\"ft_part: \" + str(ft_part))\n # print(\"team_scoring_poss: \" + str(team_scoring_poss))\n # print(\"team_orb_percentage: \" + str(team_orb_percentage))\n # print(\"team_play_percentage: \" + str(team_play_percentage))\n # print(\"team_orb_weight: \" + str(team_orb_weight))\n # print(\"orb_part: \" + str(orb_part))\n # print(\"fg_x_poss: \" + str(fg_x_poss))\n # print(\"ft_x_poss: \" + str(ft_x_poss))\n # print(\"sc_poss: \" + str(sc_poss))\n # print(\"tot_poss: \" + str(tot_poss))\n # print(\"pprod_fg_part: \" + str(pprod_fg_part))\n # print(\"pprod_ast_part: \" + str(pprod_ast_part))\n # print(\"pprod_orb_part: \" + str(pprod_orb_part))\n # print(\"pprod: \" + str(pprod))\n # print(\"result: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n\n self.ortg = \"%.2f\" % round(result, 2)\n if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:\n \"\"\"For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG \n using team's formula\"\"\"\n print(BCOLORS.OKBLUE + \"ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo\" + BCOLORS.ENDC)\n bx = self.get_standard_stats()\n result = round((bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])/self.get_team_possessions(), 2)\n self.ortg = \"%.2f\" % result", "def uncertainty_ee(self,e1,e2):\n # reco\n unc = (self._eleRecoWeight[(e1.pt(),e1.eta())][1]/self._eleRecoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleRecoWeight[(e2.pt(),e2.eta())][1]/self._eleRecoWeight[(e2.pt(),e2.eta())][0])**2\n # id-isolation\n unc += (self._eleIdIsoWeight[(e1.pt(),e1.eta())][1]/self._eleIdIsoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleIdIsoWeight[(e2.pt(),e2.eta())][1]/self._eleIdIsoWeight[(e2.pt(),e2.eta())][0])**2\n # trigger (approximate)\n unc += (abs(self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n unc += ((self._ele8TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n #outcome\n return sqrt(unc)", "def score(guess, e, v, per_pixel=False):\n e_guess = surface2im(guess, v)\n cost = (e_guess - e)**2\n if per_pixel:\n cost = torch.average(cost)\n return torch.sum(cost)" ]
[ "0.5827492", "0.5827492", "0.5789208", "0.5724517", "0.5719123", "0.5719123", "0.5719123", "0.56517696", "0.5605782", "0.55988175", "0.5591225", "0.55509675", "0.55410445", "0.5514421", "0.5474888", "0.54631376", "0.54623526", "0.5455384", "0.54417086", "0.5441079", "0.54320216", "0.54271054", "0.5407109", "0.5406066", "0.53894025", "0.5385769", "0.5382831", "0.5372122", "0.5359537", "0.53455734", "0.5345241", "0.53443116", "0.53300303", "0.53276527", "0.5317901", "0.5314616", "0.5313489", "0.5310168", "0.53064", "0.5301012", "0.52887684", "0.52884823", "0.52883035", "0.5283646", "0.52775794", "0.52697885", "0.52486014", "0.5246515", "0.5237797", "0.523161", "0.5225441", "0.5224762", "0.5224647", "0.52220327", "0.52151376", "0.52143574", "0.52133083", "0.5193439", "0.5187924", "0.5183968", "0.5183794", "0.5169784", "0.51673716", "0.51666826", "0.5163875", "0.5163034", "0.51613337", "0.5161094", "0.51536965", "0.5151077", "0.5148891", "0.51425964", "0.513458", "0.5133674", "0.51332223", "0.5133011", "0.5132489", "0.51294065", "0.5127724", "0.51247627", "0.5122119", "0.5118707", "0.511702", "0.51155984", "0.51144", "0.51141536", "0.5113", "0.5108362", "0.51069224", "0.5106404", "0.5101454", "0.51004153", "0.5098319", "0.509586", "0.5093799", "0.5088446", "0.50766206", "0.50649506", "0.50562996", "0.5052945" ]
0.5157421
68
Method which calculate Assists Percentage of a player
def set_assists_percentage(self): bx = self.get_standard_stats() team = self.get_team_stats() team_tc_conv = team["t2p_conv"] + team["t3p_conv"] player_tc_conv = bx["t2p_conv"] + bx["t3p_conv"] result = 0.00 try: if bx["minutes"] > 0: result = (bx["assists"] / (((bx["minutes"] / (team["minutes"] / 5)) * team_tc_conv) - player_tc_conv))*100 result = result if result <= 100 and result >= 0 else 0 except ZeroDivisionError: print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC) except InvalidOperation: print(BCOLORS.WARNING + "Error: Invalid Operation" + BCOLORS.ENDC) self.assists_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)", "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent", "def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))", "def percentage(count, total):\n return count / total * 100", "def getPercent(*args):", "def getPercent(*args):", "def set_assists_ratio(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n denominador = tcInt + (0.44 * float(bx[\"tl_int\"])) + float(bx[\"assists\"]) +float(bx[\"turnovers\"])\n numerador = float(bx[\"assists\"])\n result = 0.00\n if denominador > 0:\n result = (numerador / denominador) * 100\n self.assists_ratio = \"%.2f\" % round(result, 2)", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def pct(self):\n\t\treturn self.bottle.pct()", "def percentage(a, b):\n return (a * 100.0) / b", "def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT", "def administer(self):\n\n score = 0.0\n for question in self.questions:\n if question.ask_and_evaluate() is True:\n score += 1\n return (score / len(self.questions)) * 100", "def get_percentage(self):\n return self.percentage", "def get_percentage(self):\n return self.percentage", "async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )", "def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None", "def set_assists_per_turnover(self):\n bx = self.get_standard_stats()\n ratio = bx[\"assists\"]\n if bx[\"turnovers\"] > 0:\n ratio = bx[\"assists\"] / bx[\"turnovers\"]\n self.assists_per_turnover = \"%.2f\" % round(ratio, 2)", "def get_crawlera_incapsula_percent(crawlera_user):\n if crawlera_user:\n return 0\n else:\n return 100", "def percentage_update(self):\n\n self.event_update()\n return self.percentage", "def get_percent(self):\n return self.percent", "def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"", "def walkout_percentage_average(df,start_year, end_year,bat_met, player_name):\n base_fields = ['PA']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n bb_val = round((pd.to_numeric(df['BB.'].str.split('%').str[0])/100)*df['PA'],0).sum()\n pa_total = df['PA'].fillna(0).sum()\n return \"{:.2%}\".format(bb_val / pa_total)\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n return walkout_percentage_average(df,start_year, end_year,bat_met, player_name)", "def percentage(part, whole):\n return round((100 * float(part)/float(whole)),2)", "def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)", "def _percent(self, lines_total, lines_covered):\n\n if lines_total == 0:\n return '0.0'\n return str(float(float(lines_covered) / float(lines_total)))", "def percentCheck(currentTimeLabel, totalTimeLabel):\n # Updated 11/19/16\n try:\n progPercent = float(currentTimeLabel) / float(totalTimeLabel) * 100\n except (ValueError , ZeroDivisionError):\n progPercent = 0\n \n return progPercent", "def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)", "def update_percent(self):", "def calculate_percent(self, total_number, some_number):\n\t\treturn (some_number * 100) / total_number", "def percent_of(part, whole):\n return part * 100 / whole", "def update_calculated_stats(self, player_data):\n # Updates calculated statistics\n fga = player_data['FGA']\n fgm = player_data['FGM']\n pa3 = player_data['3FGA']\n pm3 = player_data['3FGM']\n try:\n player_data['FG%'] = fgm/fga\n except:\n player_data['FG%'] = 0.0\n try:\n player_data['3FG%'] = pm3/pa3\n except:\n player_data['3FG%'] = 0.0\n return(player_data)", "def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores", "def percentage(self):\n return sum(self.chunk_percentage) / self.total_steps", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def _calculate_hours_percent(used_hours, estimated_hours):\n percent = (used_hours * 100) / estimated_hours\n return percent", "def percent(self):\r\n return self._percent", "def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0", "def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)", "def percent_usage(value, total):\n if total:\n return float(value) * 100.0 / (float(total) + float(value))\n else:\n return 100.0", "def percent(value, total):\n if total:\n return float(value) * 100.0 / float(total)\n else:\n return 100.0", "def strikeout_percentage_average(df,start_year, end_year,bat_met, player_name):\n\n base_fields = ['PA']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n k_val = round((pd.to_numeric(df['K.'].str.split('%').str[0])/100)*df['PA'],0).sum()\n pa_total = df['PA'].fillna(0).sum()\n return \"{:.2%}\".format(k_val / pa_total)\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n return strikeout_percentage_average(df,start_year, end_year,bat_met, player_name)", "def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n #Between 1-8\n return own_moves / opp_moves", "def _calculate_result(found, total):\n return (found * 100) / total", "def percent(obj,object2):\n if object2:\n return int(float(int(obj))/object2*100)\n else:\n return 0", "def get_song_percent_remaining(result):\n return int((1 - (get_song_elapsed_milliseconds(result) / get_song_length_milliseconds(result))) * 100)", "def get_hit_percent(self, area, value):\n qs_related = RoundData.objects.prefetch_related(\n 'shotdata').select_related('shotdata')\n\n round_holes = int(self.round_type)\n\n if area == 'fairway':\n total_fairways = (int(self.round_type) -\n qs_related.filter(shotdata__hole__par=3).count())\n\n return round(qs_related.filter(shotdata__fairway_hit=value).count()/total_fairways, 2)\n\n if area == 'approach':\n return round(qs_related.filter(shotdata__gir_flag=value).count()/round_holes, 2)\n\n if area == 'putts':\n return round(qs_related.filter(shotdata__putt_accuracy=value).count()/round_holes, 2)", "def pct_helper(self,k,d,total):\n if k in d:\n return 100.0*d[k]/total\n else:\n return -100.0", "def set_effective_field_goal_percentage(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n tcConv = float(bx[\"t2p_conv\"] + bx[\"t3p_conv\"])\n result = 0.00\n if tcInt > 0:\n result = ((tcConv + (0.5 * float(bx[\"t3p_conv\"]))) / tcInt) * 100\n self.effective_field_goal_percentage = \"%.2f\" % round(result, 2)", "def calculate(self):\n\n s_sum = 0\n class_num = len(self.scores)\n \n for i in range(class_num):\n s_sum += self.scores[i]\n\n av = float(s_sum)/class_num\n if av >= 90:\n return 'O'\n elif av >= 80:\n return 'E'\n elif av >= 70:\n return 'A'\n elif av >= 55:\n return 'P'\n elif av >= 40:\n return 'D'\n else:\n return 'T'", "def percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"percentage\")", "def GetResult(self, playerjm):\n return self.score / len(self.scores)", "def health_percentage(self) -> Union[int, float]:\n if not self.proto.health_max:\n return 0\n return self.proto.health / self.proto.health_max", "def health_percentage(self) -> Union[int, float]:\n if not self.proto.health_max:\n return 0\n return self.proto.health / self.proto.health_max", "def percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"percentage\")", "def displayed_percent(self):\n return (self.displayed_words / self.total_words) * 100", "def custom_score(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n own_moves = game.get_legal_moves(player)\n\n return len(own_moves) / max(len(opp_moves), 1e-6)", "def pct_status(self):\r\n # DEPRECATED: self.info.n_answers will be removed\r\n # DEPRECATED: use self.t.n_answers instead\r\n if (self.info.get('n_answers')):\r\n self.n_answers = int(self.info['n_answers'])\r\n if self.n_answers != 0 and self.n_answers != None:\r\n return float(len(self.task_runs)) / self.n_answers\r\n else: # pragma: no cover\r\n return float(0)", "def get_percent_completed(self):\n completed = self.object_list.filter(status__exact=True).count()\n total = len(self.object_list)\n return int(100 * completed / total) if total > 0 else 0", "def set_usg_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n tcInt = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n a = tcInt + (Decimal('0.44')*bx[\"tl_int\"]) + bx[\"turnovers\"]\n b = team[\"minutes\"]/5\n c = (team[\"t2p_int\"] + team[\"t3p_int\"]) + (Decimal('0.44')*team[\"tl_int\"]) + team[\"turnovers\"]\n result = 0.00\n if bx[\"minutes\"] > 0:\n result = ((Decimal(a)*Decimal(b))/(bx[\"minutes\"]*c))*100\n self.usg_percentage = \"%.2f\" % round(result, 2)", "def percentage_used(self):\n return self.volume_used/self.total_volume * 100.0", "def set_ts_percentage(self):\n bx = self.get_standard_stats()\n ptos = float(bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n tsAttempts = float(tcInt + (0.44*float(bx[\"tl_int\"])))\n result = 0.00\n if tsAttempts > 0.00:\n result = (ptos/(2*tsAttempts))*100\n self.ts_percentage = \"%.2f\" % round(result, 2)", "def get_percentage_practices(measure_table):\n with open(OUTPUT_DIR / \"practice_count.json\") as f:\n num_practices = json.load(f)[\"num_practices\"]\n\n num_practices_in_study = get_number_practices(measure_table)\n\n return np.round((num_practices_in_study / num_practices) * 100, 2)", "def getGCpercentage(DNA):\n dnaLength = len(DNA) #counts the length of the DNA string\n findG = DNA.count(\"G\") #finds the letter G in DNA string\n findC = DNA.count(\"C\") #finds the letter C in DNA string\n print(findG)\n print(findC)\n print(dnaLength)\n GCpercent = ((findC + findG)/dnaLength) * 100 #calculates percentage of Gs and Cs\n print(\"Percentage of G and C:\",\" %6.2f\" % GCpercent)\n \n return getGCpercentage", "def get_score_percent(self, value):\n qs_related = RoundData.objects.prefetch_related(\n 'shotdata').select_related('shotdata')\n\n round_holes = int(self.round_type)\n\n if value == 'par':\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'birdie_better':\n return round((qs_related.filter(shotdata__nr_strokes__lt=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'tbogey_worse':\n return round((qs_related.filter(shotdata__nr_strokes__gte=F('shotdata__hole__par')+3).count()/round_holes), 2)\n if isinstance(value, int):\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par') + value).count()/round_holes), 2)", "def points_percentage(plane, p, points, total):\n match = 0\n for point in points:\n if distance_to_plane(plane, point) <= p:\n match += 1\n\n return match / total", "def per(a):\n return a * 100", "def letter_percent(s):\r\n\r\n alpha = 'abcdefghijklmnopqrstuvwxyz'\r\n s_lower = s.lower()\r\n s_length = 0\r\n letter_count = {} # empty dictionary\r\n keys = letter_count.keys()\r\n\r\n for char in s_lower:\r\n if char in alpha:\r\n s_length = s_length + 1\r\n if char in letter_count:\r\n letter_count[char] = letter_count[char] + 1\r\n else:\r\n letter_count[char] = 1\r\n\r\n for char in sorted(keys):\r\n letter_count[char] = (letter_count[char] / s_length) * 100\r\n print(char, \"{:.1f}%\".format(letter_count[char]))", "def percentageCompletion(url, workflow, dataset):\n inputEvents = reqMgrClient.getInputEvents(url, workflow)\n outputEvents = reqMgrClient.getOutputEvents(url, workflow, dataset)\n if inputEvents == 0:\n return 0\n if not outputEvents:\n return 0\n percentage = outputEvents/float(inputEvents)\n return percentage", "def calculate_progress_percentage(d):\n successcounter = 0\n for test in d:\n if d[test][\"status\"] != \"not yet run\":\n successcounter += 1\n totalcounter = 0\n for test in d:\n totalcounter += 1\n return int(successcounter / totalcounter * 100)", "def p(party, vote_count, s):\n return t(party, vote_count) / d(s)", "def percentage(x, y):\n try:\n return 100 * (float(x) / y)\n except ZeroDivisionError:\n return \"undefined\"", "def percentage(my_list, item):\n return 100.0 * frequency(my_list, item)", "def update_stats(home_player: Player, away_player: Player,\n home_score: int, away_score: int) -> None:\n home_player.goals_for += home_score\n home_player.goals_against += away_score\n away_player.goals_for += away_score\n away_player.goals_against += home_score\n\n # home won\n if (home_score > away_score):\n home_player.wins += 1\n away_player.losses += 1\n # away won\n elif (home_score < away_score):\n home_player.losses += 1\n away_player.wins += 1\n # draw\n else:\n home_player.draws += 1\n away_player.draws += 1", "def occurance(row):\r\n # divide the row's highest counted cause by the row's total number of deaths\r\n percentage = row['max_count'] / row['all_count']\r\n percentage *= 100\r\n # round the percentage up so it's two digits\r\n return round(percentage)", "def as_percentages(self):\n if self.e_pct is None:\n self.calculate_percentages()\n\n score_str = 'E/I: ' + str(self.e_pct) + '%/' + str(self.i_pct) + '%; '\n score_str += 'N/S: ' + str(self.n_pct) + '%/' + str(self.s_pct) + '%; '\n score_str += 'F/T: ' + str(self.f_pct) + '%/' + str(self.t_pct) + '%; '\n score_str += 'J/P: ' + str(self.j_pct) + '%/' + str(self.p_pct) + '%'\n return score_str", "def calc_stats(hits, misses):\n try:\n result = (float(misses) / float(hits)) * 100.0\n except ZeroDivisionError:\n if misses == 0:\n result = 0.0\n else:\n result = 100.0\n return result", "def calculatePercentChange(self, oldValue, newValue):\n return (((newValue - oldValue)/oldValue)*100)", "def as_counts_and_pcts(self):\n if self.e_pct is None:\n self.calculate_percentages()\n\n score_str = 'E: ' + str(self.e_score) + '(' + str(self.e_pct) + '%)/'\n score_str += 'I: ' + str(self.i_score) + '(' + str(self.i_pct) + '%) - '\n score_str += 'N: ' + str(self.n_score) + '(' + str(self.n_pct) + '%)/'\n score_str += 'S: ' + str(self.s_score) + '(' + str(self.s_pct) + '%) - '\n score_str += 'F: ' + str(self.f_score) + '(' + str(self.f_pct) + '%)/'\n score_str += 'T: ' + str(self.t_score) + '(' + str(self.t_pct) + '%) - '\n score_str += 'J: ' + str(self.j_score) + '(' + str(self.j_pct) + '%)/'\n score_str += 'P: ' + str(self.p_score) + '(' + str(self.p_pct) + '%)'\n return score_str", "def get_attendance(self):\n\n if len(self.attendance_list):\n attendance_sum = 0\n for attendance in self.attendance_list:\n attendance_sum += attendance.attendance_state\n return attendance_sum/len(self.attendance_list) * 100\n\n else:\n return 100.0", "async def get_rob_percentage(level):\n chance = int(6 + (level // 10)) # first 10 levels is 6 for 30% chance\n if chance > 16:\n chance = 16\n return chance", "def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def percent_frequencies(self):\n word_count = 0\n local = self.frequencies()\n for key in local.keys():\n i = local[key]\n word_count += int(i)\n for key in local.keys():\n i = local[key]\n percentage = float(i) / float(word_count)\n local[key] = percentage\n return local", "def percent_rating(value):\n value = Decimal(value)\n value = round(value / 3, 2) * 100\n return value", "def get_proficiency_percentage(self):\n choice_values = [choice[0] for choice in self.PROFICIENCY_CHOICES]\n if '' in choice_values:\n choice_values.remove('') # Remove the empty proficiency choice\n choice_values.sort() # Ensure values are in the correct order\n\n value = choice_values.index(self.proficiency) + 1\n factor = 100 / len(choice_values)\n percentage = round(value * factor)\n\n return percentage", "def percent_raised(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.total_raised() * 100 / total_cost, 2)\n else:\n return 0", "def value_to_percent(value):\n return ...", "def percent_community(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.community_contribution * 100 / total_cost, 2)\n else:\n return 0", "def get_total_health(self,obs):\n total_health = 0\n for unit in obs.observation.raw_units:\n if(unit.alliance == PlayerRelative.SELF):\n total_health += unit[FeatureUnit.health]\n return total_health", "def percentage(context, num, total_num):\n\n p = float(num)/float(total_num) * 100\n percent = str(p) + \"%\"\n return percent", "def response_count_percentage(this_count):\n num_targets = db.session.query(ColourTargetColBG.id).count()\n return (this_count / num_targets) * 100.0", "def represent_total_percent(self, length):\n numpkgs = self.totals['numpkgs']\n dlpkgs = self.totals['dlpkgs']\n return self.represent_percent(dlpkgs, numpkgs, length)", "def percent_updated(self):\n return self.percent_complete - self.previous_percent_complete", "def percent(num):\n return round(num * 100, 1)", "def calculate_probability(self):\n return 0", "def percentage_complete(self) -> float:\n return self.__percentage_complete", "def availability(self, up_time=100, down_time=0, dec_point=3):\n self.up_time = up_time\n self.down_time = down_time\n self.dec_point = dec_point\n avail_percentage = round(float((up_time/(up_time+down_time))*100),dec_point) \n return avail_percentage", "def find_percentage(urls):\n # n is the number of pages that lead to philosophy\n n = 0\n for url in urls:\n if find_philosophy(url, [], 0) != -1:\n n += 1\n percentage = n * 100 / len(urls)\n return percentage" ]
[ "0.7001746", "0.68421483", "0.6813", "0.67631215", "0.67387015", "0.6656908", "0.6627645", "0.6627645", "0.6618226", "0.6604493", "0.66013306", "0.6542234", "0.6510403", "0.64416814", "0.64405453", "0.64405453", "0.64318055", "0.64273596", "0.63814425", "0.63652843", "0.6354084", "0.6325873", "0.6309475", "0.6297875", "0.6286082", "0.6275193", "0.62622076", "0.62381136", "0.62343794", "0.62315017", "0.62289983", "0.62277", "0.62006307", "0.61964226", "0.61886334", "0.6177423", "0.6177423", "0.6175648", "0.61690426", "0.61596674", "0.61416525", "0.6138142", "0.6120936", "0.60969883", "0.60961986", "0.6091701", "0.6087901", "0.60828185", "0.60672575", "0.60376185", "0.6017559", "0.60148805", "0.60108364", "0.6005199", "0.6003304", "0.6003304", "0.59943", "0.5990661", "0.5978797", "0.59784144", "0.59780824", "0.5977875", "0.596515", "0.5952", "0.5937085", "0.59344935", "0.59332335", "0.593103", "0.59262705", "0.5916972", "0.5906936", "0.5906773", "0.5893445", "0.58924633", "0.58739084", "0.5867139", "0.58595276", "0.58591443", "0.5854312", "0.58542645", "0.5851259", "0.58385366", "0.5837661", "0.58244866", "0.5823833", "0.5818401", "0.5812386", "0.5807664", "0.5786824", "0.5781525", "0.5780029", "0.57776797", "0.57698756", "0.57646286", "0.57612747", "0.5758602", "0.5756928", "0.5751051", "0.5739429", "0.5738761" ]
0.767859
0
Method which calculate Ratio Assists Per Turnover of a player
def set_assists_per_turnover(self): bx = self.get_standard_stats() ratio = bx["assists"] if bx["turnovers"] > 0: ratio = bx["assists"] / bx["turnovers"] self.assists_per_turnover = "%.2f" % round(ratio, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )", "def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))", "def set_assists_ratio(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n denominador = tcInt + (0.44 * float(bx[\"tl_int\"])) + float(bx[\"assists\"]) +float(bx[\"turnovers\"])\n numerador = float(bx[\"assists\"])\n result = 0.00\n if denominador > 0:\n result = (numerador / denominador) * 100\n self.assists_ratio = \"%.2f\" % round(result, 2)", "def winrate(matches):\n if not matches:\n print('no matches')\n return None\n\n win_loss = [match['result'] for match in matches]\n return sum(win_loss)/len(win_loss)", "def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent", "def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)", "def score(self,player, board):\r\n numPlayer = 0\r\n numOpp = 0\r\n for i in self.squares():\r\n if board[i] == player:\r\n numPlayer+= SQUARE_WEIGHTS[i]\r\n else:\r\n numOpp+=SQUARE_WEIGHTS[i]\r\n return numPlayer-numOpp", "def mc_update_scores(scores, board, player):\n dim = board.get_dim()\n winner = board.check_win()\n other_player = provided.switch_player(player)\n \n if winner == provided.DRAW:\n ratio = {player: 0, other_player: 0, 1: 0}\n elif winner == player:\n ratio = {player: 0 + SCORE_CURRENT, other_player: 0 - SCORE_OTHER, provided.EMPTY: 0}\n elif winner == other_player:\n ratio = {player: 0 - SCORE_CURRENT, other_player: 0 + SCORE_OTHER, provided.EMPTY: 0}\t\n \n for valx in range(dim):\n for valy in range(dim): \n scores[valx][valy] += ratio[board.square(valx, valy)] \n return scores", "def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)", "def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate", "def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores", "def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)", "def set_assists_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n team_tc_conv = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n player_tc_conv = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0:\n result = (bx[\"assists\"] / (((bx[\"minutes\"] / (team[\"minutes\"] / 5)) * team_tc_conv) - player_tc_conv))*100\n result = result if result <= 100 and result >= 0 else 0\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.assists_percentage = \"%.2f\" % round(result, 2)", "def calc_match_points(self, match):\n if match.winner == match.TIE:\n match.home.tournament_score += 1\n match.away.tournament_score += 1\n else:\n match.winner.tournament_score += 3\n match.loser.tournament_score += 0", "def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n #Between 1-8\n return own_moves / opp_moves", "def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT", "def custom_score(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n own_moves = game.get_legal_moves(player)\n\n return len(own_moves) / max(len(opp_moves), 1e-6)", "def cal_hit_ratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n score = 0.0\n # golden items hit in the top_K items\n score_1 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)])) for i,d in top_k.groupby('user')])\n score_2 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)])) for i,d in top_k.groupby('user')])\n score = score_1 - score_2\n return score/full['user'].nunique()", "def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def win_ratio_avg(self):\n win_ratio = 0\n # Adds all the win ratios of team in this conference which will be\n # used to compute the win ratio average.\n for team_obj in self._conf_teams:\n ### INVARIANT: team_obj is a Team class object and\n ### self._conf_teams is a list of Team class objects.\n win_ratio += team_obj._win_ratio\n return win_ratio/len(self._conf_teams)", "def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100", "def custom_score_6(game, player):\n \"\"\"custom_score_6 heuristic function aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - 1.5*length_opp_payer_moves*length_opp_payer_moves)", "def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2", "def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2", "def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2", "def rate_board(board, player):\n approx_player_moves = sum(\n len(_get_empty_neighbors((i, j), board))\n for i in range(5)\n for j in range(5)\n if board[i][j] == player\n )\n approx_opponent_moves = sum(\n len(_get_empty_neighbors((i, j), board))\n for i in range(5)\n for j in range(5)\n if board[i][j] == -player\n )\n return approx_player_moves - approx_opponent_moves", "def weighted_score(player, board):\n opp = Othello.opponent(player)\n total = 0\n for sq in Othello.squares():\n if board[sq] == player:\n total += SQUARE_WEIGHTS[sq]\n elif board[sq] == opp:\n total -= SQUARE_WEIGHTS[sq]\n return total", "def eval(self):\n\n ratio_player_win = self.player_wins / self.num_test\n ratio_opponent_win = self.opponent_wins / self.num_test\n ratio_tie = 1.0 - ratio_player_win - ratio_opponent_win\n\n print(\"\\nPlayer Test Results:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_tie))\n\n ratio_optimal_win = self.optimal_wins / self.num_test\n ratio_optimal_loose = self.optimal_losses / self.num_test\n ratio_optimal_tie = 1.0 - ratio_optimal_win - ratio_optimal_loose\n\n print(\"\\nOptimal Results:\")\n print(\"\\tPlayer {0:.2f}%\".format(100.0 * ratio_optimal_win))\n print(\"\\tOpponent {0:.2f}%\".format(100.0 * ratio_optimal_loose))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_optimal_tie))\n\n # Ratio of win, loss diff between player and optimal\n # positive if the player beats opponent\n relative_result = ((ratio_player_win - ratio_opponent_win) /\n (ratio_optimal_win - ratio_optimal_loose))\n\n print(\"\\nResults Player Relative Optimal:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win / ratio_optimal_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win / ratio_optimal_loose))\n print(\"\\tScore {0:.2f}%\".format(100.0 * relative_result))\n\n if self.last_test is not None:\n print(\"Diff from last test score is {0:.2f}%\".format(100.0 * (relative_result - self.last_test)))\n self.last_test = relative_result", "def compute_score(window, computer_piece):\n score = 0\n if window.count(computer_piece) == 4:\n score += 100\n elif window.count(computer_piece) == 3 and window.count(0) == 1:\n score += 5\n elif window.count(computer_piece) == 2 and window.count(0) == 2:\n score += 2\n if window.count(PLAYER_PIECE) == 2 and window.count(0) == 2:\n score -= 1\n if window.count(PLAYER_PIECE) == 3 and window.count(0) == 1:\n score -= 100\n return score", "def custom_score_3(game, player):\n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: log of avaliable moves ratio\n return float(log(own_moves/opp_moves))", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = own_moves / opp_moves\n\n completeness = completeness_of_game(game)\n centerness_score = 0\n\n if completeness < 0.5:\n own_centerness = centerness(game, player)\n opp_centerness = centerness(game, game.get_opponent(player))\n centerness_ratio = own_centerness / opp_centerness + 0.1\n\n center_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def calculate_my_win_strength(self):\n self.winStrength = self.strategy(deepcopy(self.currentBoardState))", "def set_offensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0 and (bx[\"t2p_int\"] + bx[\"t3p_int\"]) > 0:\n fgm = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n fga = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n team_fgm = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n team_fga = team[\"t2p_int\"] + team[\"t3p_int\"]\n team_points = team[\"t2p_conv\"]*2 + team[\"t3p_conv\"]*3 + team[\"tl_conv\"]\n points = bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"]\n\n try:\n qAST = (Decimal(bx[\"minutes\"] / (team[\"minutes\"] / 5)) * (Decimal('1.14') * Decimal((team[\"assists\"] - bx[\"assists\"]) / team_fgm))) + \\\n Decimal((((team[\"assists\"] / team[\"minutes\"]) * bx[\"minutes\"] * 5 - bx[\"assists\"]) / ((team_fgm / team[\"minutes\"]) * bx[\"minutes\"] * 5 - fgm)) * (1 - (bx[\"minutes\"] / (team[\"minutes\"] / 5))))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n qAST = 1\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n qAST = 1\n\n fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n ast_part = Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2*(team_fga - fga))) * bx[\"assists\"]\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n ast_part = 0\n\n if bx[\"tl_int\"] > 0:\n ft_part = Decimal(1 - (1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_part = 0\n team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team[\"tl_conv\"] / team[\"tl_int\"]))**2) * team[\"tl_int\"] * Decimal('0.4'))\n try:\n team_orb_percentage = Decimal(team[\"reb_of\"] / (team[\"reb_of\"] + ((opp_team[\"reb_def\"] + opp_team[\"reb_of\"]) - opp_team[\"reb_of\"])))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n\n team_play_percentage = Decimal(team_scoring_poss / (team_fga + team[\"tl_int\"] * Decimal('0.4') + team[\"turnovers\"]))\n try:\n team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_weight = 0\n\n orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage\n\n fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)\n if bx[\"tl_conv\"] > 0:\n ft_x_poss = Decimal((1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_x_poss = Decimal(1 - (bx[\"tl_conv\"] / 1)**2) * Decimal('0.4') * bx[\"tl_int\"]\n try:\n sc_poss = (fg_part + ast_part + ft_part) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n sc_poss =0\n\n tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx[\"turnovers\"]\n\n pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx[\"t3p_conv\"]) * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team[\"t3p_conv\"] - bx[\"t3p_conv\"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2 * (team_fga - fga))) * bx[\"assists\"]\n except:\n pprod_ast_part = 0\n\n pprod_orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team[\"tl_conv\"] / team[\"tl_int\"])**2) * Decimal('0.4') * team[\"tl_int\"]))\n try:\n pprod = (pprod_fg_part + pprod_ast_part + bx[\"tl_conv\"]) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n pprod = 0\n\n try:\n result = 100 * (pprod / tot_poss)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n result = 0\n\n # print(\"fgm: \" + str(fgm))\n # print(\"fga: \" + str(fga))\n # print(\"team_fgm: \" + str(team_fgm))\n # print(\"team_fga: \" + str(team_fga))\n # print(\"team_points: \" + str(team_points))\n # print(\"points: \" + str(points))\n # print(\"qAST: \" + str(qAST))\n # print(\"fg_part: \" + str(fg_part))\n # print(\"ast_part: \" + str(ast_part))\n # print(\"ft_part: \" + str(ft_part))\n # print(\"team_scoring_poss: \" + str(team_scoring_poss))\n # print(\"team_orb_percentage: \" + str(team_orb_percentage))\n # print(\"team_play_percentage: \" + str(team_play_percentage))\n # print(\"team_orb_weight: \" + str(team_orb_weight))\n # print(\"orb_part: \" + str(orb_part))\n # print(\"fg_x_poss: \" + str(fg_x_poss))\n # print(\"ft_x_poss: \" + str(ft_x_poss))\n # print(\"sc_poss: \" + str(sc_poss))\n # print(\"tot_poss: \" + str(tot_poss))\n # print(\"pprod_fg_part: \" + str(pprod_fg_part))\n # print(\"pprod_ast_part: \" + str(pprod_ast_part))\n # print(\"pprod_orb_part: \" + str(pprod_orb_part))\n # print(\"pprod: \" + str(pprod))\n # print(\"result: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n\n self.ortg = \"%.2f\" % round(result, 2)\n if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:\n \"\"\"For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG \n using team's formula\"\"\"\n print(BCOLORS.OKBLUE + \"ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo\" + BCOLORS.ENDC)\n bx = self.get_standard_stats()\n result = round((bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])/self.get_team_possessions(), 2)\n self.ortg = \"%.2f\" % result", "def custom_score_5(game, player):\n \"\"\"custom_score_5 heuristic function defines chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)", "def custom_score_3(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opponent = game.get_opponent(player)\n\n opp_moves = game.get_legal_moves(opponent)\n own_moves = game.get_legal_moves(player)\n\n # Calculate the normalized distance if both players are on the board.\n player_loc = game.get_player_location(player)\n opp_loc = game.get_player_location(opponent)\n norm_dis = 0\n if opp_loc and player_loc:\n norm_dis = distance(player_loc, opp_loc) / 8.46 # 8.46 is distance((0, 0), (6, 6))\n\n return len(own_moves) / max(len(opp_moves), 1e-6) - norm_dis", "def evaluate(self, state):\n\t\ttranspose = state.board.transpose()\t\t# columns in state.board = rows in transpose\n\t\tcount = []\n\t\topponentcount = []\n\t\tfor row, column in zip(state.board, transpose):\n\t\t\trowcounter = collections.Counter(row)\n\t\t\tcolumncounter = collections.Counter(column)\n\t\t\tcount.append(rowcounter.get(state.current_player, 0))\n\t\t\tcount.append(columncounter.get(state.current_player, 0))\n\t\t\topponentcount.append(rowcounter.get(state.current_player * - 1, 0))\n\t\t\topponentcount.append(columncounter.get(state.current_player * -1 , 0))\n\n\t\tY = state.board[:, ::-1]\n\t\tdiagonals = [np.diagonal(state.board), np.diagonal(Y)]\n\t\tmain_diagonal_count = collections.Counter(diagonals[0])\n\t\tsecond_diagonal_count = collections.Counter(diagonals[1])\n\t\tcount.append(main_diagonal_count.get(state.current_player, 0))\n\t\tcount.append(second_diagonal_count.get(state.current_player, 0))\n\t\topponentcount.append(main_diagonal_count.get(state.current_player * - 1, 0))\n\t\topponentcount.append(second_diagonal_count.get(state.current_player * -1, 0))\n\n\t\t# max(count): maximum number of player's tiles in a row, column, or a diagonal (the highest value is 5)\n\t\t# max(opponentcount): maximum number of opponent's tiles in a row, column, or a diagonal (the highest value is 5)\n\t\tscoremax = 5 ** max(count)\n\t\tscoremin = 5 ** max(opponentcount)\n\n\t\treturn scoremax - scoremin", "def _estimate_strength_from_results(\n number_of_games: int, number_of_wins: int, opponent_rating: float\n) -> Tuple[float, Tuple[float, float]]:\n n, p = number_of_games, number_of_wins / number_of_games\n q = 1 - p\n\n if n * p * q < 9: # Cannot apply normal approximation of binomial distribution\n raise ValueError(\n \"The results obtained in evaluate_player are too extreme to obtain an \"\n \"accurate player evaluation. You can try to solve this issue by increasing\"\n \" the total number of battles. Obtained results: %d victories out of %d\"\n \" games.\" % (p * n, n)\n )\n\n estimate = opponent_rating * p / q\n error = (\n math.sqrt(n * p * q) / n * 1.96\n ) # 95% confidence interval for normal distribution\n\n lower_bound = max(0, p - error)\n lower_bound = opponent_rating * lower_bound / (1 - lower_bound)\n\n higher_bound = min(1, p + error)\n\n if higher_bound == 1:\n higher_bound = math.inf\n else:\n higher_bound = opponent_rating * higher_bound / (1 - higher_bound)\n\n return estimate, (lower_bound, higher_bound)", "def custom_score_3(game, player):\n \"\"\"custom_score_3 heuristic function aims at maximizing win chances of my agent\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = 1.0 * len(game.get_legal_moves(player))#Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves with oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(length_my_player_moves/length_opp_payer_moves)", "def custom_score_4(game, player):\n \"\"\"custom_score_4 heuristic function aims at minimizing loosing chances of myPlayer\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = 1.0 * len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(-length_opp_payer_moves/length_my_player_moves)", "def gameOver(self, myScore, oppScore):\r\n self.gamePlayed += 1\r\n self.averageNumTurns = (self.averageNumTurns * self.gamePlayed + self.thisNumTurns) / self.gamePlayed\r\n self.thisNumTurns = 0.0", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n if game.is_winner(player):\n return float(\"inf\")\n\n # Aim to maximise your own available moves vs the opponent (Factor 2)\n\n opponent = game.get_opponent(player)\n return float(len(game.get_legal_moves(player)))-2.0*float(len(game.get_legal_moves(opponent)))", "def calc_winner(self):\n pass", "def score2(self,player, board):\r\n numPlayer = 0\r\n numOpp = 0\r\n for i in self.squares():\r\n if board[i] == player:\r\n numPlayer+= 1\r\n else:\r\n numOpp+=1\r\n return numPlayer-numOpp", "def opponents_score(self):\n if self.opponent_wickets == 10:\n var1 = \"All Out\"\n return str('{0} {1}').format(self.opponent_runs, var1)\n else:\n var1 = self.opponent_wickets\n return str('{0}-{1}').format(self.opponent_runs, var1)", "def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)", "def get_win_rate_regular_season_for_each_coach(self):\n self.games_won_for_coaches = (\n self.raw_data_regularseason\n [['Season','DayNum','WTeamID']]\n # merge for winning team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','WTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_win\",\"LastDayNum\":\"LastDayNum_win\",\"CoachName\":\"CoachName_win\",\"TeamID\":\"TeamID_win\"})\n .pipe(lambda x:x.assign(which_coach_for_win = np.where((x.FirstDayNum_win <= x.DayNum) & (x.LastDayNum_win >= x.DayNum),1,0)))\n .query(\"which_coach_for_win != 0\")\n .groupby(['Season','CoachName_win','WTeamID'])\n .agg({\"which_coach_for_win\":\"sum\"})\n .reset_index()\n )\n\n self.games_lose_for_coaches = (\n self.raw_data_regularseason\n [['Season','DayNum','LTeamID']]\n # merge for losing team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','LTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_lose\",\"LastDayNum\":\"LastDayNum_lose\",\"CoachName\":\"CoachName_lose\",\"TeamID\":\"TeamID_lose\"})\n .pipe(lambda x:x.assign(which_coach_for_lose = np.where((x.FirstDayNum_lose <= x.DayNum) & (x.LastDayNum_lose >= x.DayNum),1,0)))\n .query(\"which_coach_for_lose != 0\")\n .groupby(['Season','CoachName_lose','LTeamID'])\n .agg({\"which_coach_for_lose\":\"sum\"})\n .reset_index()\n )\n\n # combine games won and lost df\n self.combine_regular_games_won_lose = (\n self.games_lose_for_coaches\n .merge(self.games_won_for_coaches,how='left',left_on=['Season','LTeamID','CoachName_lose'],right_on=['Season','WTeamID','CoachName_win'])\n .pipe(lambda x:x.assign(win_rate_regular = x.which_coach_for_win/(x.which_coach_for_win + x.which_coach_for_lose)))\n .drop(['CoachName_win','WTeamID'],1)\n .rename(columns={\"CoachName_lose\":\"CoachName\",\"LTeamID\":\"TeamID\",\"which_coach_for_lose\":\"games_lost\",\"which_coach_for_win\":\"games_won\"})\n )", "def calculate_score(self):\n\n correct_award = 150\n turns_total = self.turns.count()\n turns_correct = self.turns.filter(is_match=True).count()\n seconds_left = (60.0 - (self.turns.last().created - self.turns.first().created).total_seconds()) or 0\n maxpoints = turns_correct * correct_award\n deduction_for_errors = correct_award * 0.11123\n\n maxpoints -= ((turns_total - turns_correct) * 2 * deduction_for_errors)\n maxpoints += seconds_left * 5.123214\n\n return Decimal(maxpoints)", "def get_game_score(self):\n if self.game_is_tied():\n return 0\n elif self.is_game_won():\n my_available_steps = self.steps_available(self.loc)\n opp_available_steps = self.steps_available(self.opponent_loc)\n my_score = self.my_score - self.penalty_score if my_available_steps == 0 else self.my_score\n opp_score = self.opponent_score - self.penalty_score if opp_available_steps == 0 else self.opponent_score\n return (my_score - opp_score) / (abs(my_score) + abs(opp_score))\n else:\n if abs(self.my_score) + abs(self.opponent_score) == 0:\n return 0\n return (self.my_score - self.opponent_score) / (abs(self.my_score) + abs(self.opponent_score))", "def calculate_score_pairs(hand_value,*args):\n # ratios=[1,10,100,1000,10000]\n ratios = CONST.RATIOS[:]\n return sum(map(lambda a,b:a/b, args, ratios))+hand_value", "def improved_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def GetResult(self, playerjm):\n return self.score / len(self.scores)", "def update_stats(home_player: Player, away_player: Player,\n home_score: int, away_score: int) -> None:\n home_player.goals_for += home_score\n home_player.goals_against += away_score\n away_player.goals_for += away_score\n away_player.goals_against += home_score\n\n # home won\n if (home_score > away_score):\n home_player.wins += 1\n away_player.losses += 1\n # away won\n elif (home_score < away_score):\n home_player.losses += 1\n away_player.wins += 1\n # draw\n else:\n home_player.draws += 1\n away_player.draws += 1", "def evaluate(self, state):\n total_rewards = np.array([0.0] * state.num_players)\n for _ in range(self.width): # for each of width simulations\n total_rewards += self.run_rollout(state)\n\n return total_rewards / self.width", "def score(player, board):\n mine, theirs = 0, 0\n opp = Othello.opponent(player)\n for sq in Othello.squares():\n piece = board[sq]\n if piece == player: mine += 1\n elif piece == opp: theirs += 1\n return mine - theirs", "def custom_score_7(game, player):\n \"\"\"custom_score_7 heuristic function also aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(1.5*length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)", "def expected_value(held_dice, num_die_sides, num_free_dice):\n result = 0\n outcomes = range(1, num_die_sides + 1)\n possible = sorted(gen_all_sequences(outcomes, num_free_dice))\n for hand in possible:\n result += score(held_dice + hand)\n return float(result)/len(possible)", "def get_fool_ratio(self, test_acc, attack_accs):\n return [round(100*((test_acc - attack_acc) / test_acc), 2) for attack_acc in attack_accs]", "def get_new_ratings(players, teams):\n nb_players_team0 = len(teams[0])\n nb_players_team1 = len(teams[1])\n winner = players[teams[0][0]]\n loser = players[teams[1][0]]\n if nb_players_team0 == 1 and nb_players_team1 == 1:\n new_r1, new_r3 = rate_1vs1(winner,loser)\n elif nb_players_team0 == 1 and nb_players_team1 > 1:\n team_loser = [loser, players[teams[1][1]]]\n (new_r1), (new_r3, new_r4) = rate([winner, team_loser], ranks=[0, 1]) \n elif nb_players_team0 > 1 and nb_players_team1 == 1:\n team_winner = [winner, players[teams[0][1]]]\n (new_r1, new_r2), (new_r3) = rate([team_winner, loser], ranks=[0, 1]) \n else:\n team_loser = [loser, players[teams[1][1]]]\n team_winner = [winner, players[teams[0][1]]]\n (new_r1, new_r2), (new_r3, new_r4) = rate([team_winner, team_loser], ranks=[0, 1]) \n player1 = {'name': teams[0][0], 'mu': new_r1.mu, 'sigma': new_r1.sigma}\n player3 = {'name': teams[1][0], 'mu': new_r3.mu, 'sigma': new_r3.sigma}\n if nb_players_team0 > 1:\n player2 = {'name': teams[0][1], 'mu': new_r2.mu, 'sigma': new_r2.sigma}\n if nb_players_team1 > 1:\n player4 = {'name': teams[1][1], 'mu': new_r4.mu, 'sigma': new_r4.sigma}\n if nb_players_team0 > 1:\n return [player1, player2, player3, player4]\n return [player1, player2, player4]\n return [player1, player3]", "def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = get_outcomes(num_die_sides)\n print \"outcomes:\", outcomes\n\n # generate all possible sequences of rolls\n all_rolls = list(gen_all_sequences(outcomes, num_free_dice))\n results = [max_repeats(roll) for roll in all_rolls]\n value = 0.0 \n\n\n for result in all_rolls:\n curr_hand = tuple(list(held_dice) + list(result))\n value += score(curr_hand)\n\n return value / len(all_rolls)", "def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc", "def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores", "def get_score(self, player: int) -> int:\n score = 0\n i = 0\n while i < len(self.leylines):\n score += 1 if self.leylines[i].player == player else 0\n score += 1 if self.rights[i].player == player else 0\n score += 1 if self.lefts[i].player == player else 0\n i += 1\n return score", "def horizontal_ratio(self):\n if self.pupils_located:\n pupil_left = self.eye_left.pupil.x / (self.eye_left.center[0] * 2 - 10)\n pupil_right = self.eye_right.pupil.x / (self.eye_right.center[0] * 2 - 10)\n return (pupil_left + pupil_right) / 2", "def get_average_distance_to_opponents(obs, player_x, player_y):\n distances_sum = 0\n distances_amount = 0\n for i in range(1, len(obs[\"right_team\"])):\n # if opponent is ahead of player\n if obs[\"right_team\"][i][0] > (player_x - 0.02):\n distance_to_opponent = get_distance(player_x, player_y, obs[\"right_team\"][i][0], obs[\"right_team\"][i][1])\n if distance_to_opponent < 0.03:\n distances_sum += distance_to_opponent\n distances_amount += 1\n # if there is no opponents close around\n if distances_amount == 0:\n return 2, distances_amount\n return distances_sum / distances_amount, distances_amount", "def host_result_value(winner: Winner) -> float:\n if winner == Winner.HOME:\n return 1\n if winner == Winner.AWAY:\n return 0\n return 0.5", "def calculate_score(player_cards):\n score = sum(player_cards)\n return score", "def rough_outcome(self) -> float:\n # HUYNH YOU PRICK WHY THE FUCK DO YOU MAKE US WRITE THIS SHIT EVEN IT'S NOT USED ANYWHERE\n # pick move based on this may not be optimal but better than random\n # return 1 if win immediately\n # return -1 if all states reachable will result the other player win\n # return 0 if otherwise ??? what the fuck does this mean\n # look two states forward\n pass", "def calculateRatio(levelDims):\n highestReso = np.asarray(levelDims[0])\n lowestReso = np.asarray(levelDims[-1])\n Xratio, Yratio = highestReso/lowestReso\n return (Xratio, Yratio)", "def calc_win_lose_ratio(self):\n total = len(self.train_y)\n survived = 0\n for i in self.train_y:\n if i > 0:\n survived += 1\n\n self.survival_sum = [survived, total-survived]", "def getWinProbability(team1, team2, r, year=2019):\n\tR1_PROBS = R1_PROBS_2019 if year == 2019 else R1_PROBS_2020\n\tALPHA_VALS = ALPHA_VALS_2019 if year == 2019 else ALPHA_VALS_2020\n\n\t# Currently using Power Model\n\ts1 = team1['seed']\n\ts2 = team2['seed']\n\n\t# Use R1_PROBS for round 1\n\tif r == 1:\n\t\tif not (s1 + s2 == 17):\n\t\t\texit('Invalid round 1 matchup: seeds {0} vs. {1}.'.format(s1, s2))\n\t\treturn R1_PROBS[s1] if s1 < s2 else R1_PROBS[s2]\n\t\n\t# Use ALPHA_VALS for other rounds (unless seeds are same)\n\tif s1 == s2:\n\t\treturn 0.5\n\n\talpha = ALPHA_VALS[r]\n\ts1a = (s1 * 1.0) ** alpha\n\ts2a = (s2 * 1.0) ** alpha\n\treturn s2a / (s1a + s2a)", "def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)", "def _compute_winrates(synergy, counter, heroes_released):\n for i in range(heroes_released):\n for j in range(heroes_released):\n if i != j and i != 23 and j != 23:\n if synergy['games'][i, j] != 0:\n synergy['winrate'][i, j] = synergy['wins'][i, j] / \\\n float(synergy['games'][i, j])\n\n if counter['games'][i, j] != 0:\n counter['winrate'][i, j] = counter['wins'][i, j] / \\\n float(counter['games'][i, j])", "def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n if game.is_winner(player):\n return float(\"inf\")\n game_phase = len(game.get_blank_spaces()) # high if early, low if late\n\n # Heuristic tries to take advantage of the center and shadowing if possible, otherwise stick to the centre and maximise number of moves \n\n # (*0) Calculate the (theoretical) centre\n center = (game.width / 2., game.height / 2.)\n opponent = game.get_opponent(player)\n loc_player = game.get_player_location(player)\n loc_opponent = game.get_player_location(opponent)\n if game.width % 2 != 0 and game.height % 2 != 0:\n trueCentre = True\n loc_mirror = tuple(abs(x-(game.width-1)) for x in loc_player) # the mirrored location of the player across the axes\n else:\n trueCentre = False\n # (1) Always take the centre!\n if loc_player == center:\n return float(\"inf\")\n # (2) If opponent has the centre, avoid a position within knight's movement at all costs to avoid shadowing\n if loc_opponent == center:\n r, c = center\n directions = [(-2, -1), (-2, 1), (-1, -2), (-1, 2),(1, -2), (1, 2), (2, -1), (2, 1)]\n avoidable_positions = [(r + dr, c + dc) for dr, dc in directions]\n if loc_player in avoidable_positions:\n return float(\"-inf\")\n # (3) If we can shadow the opponent, we should!\n if trueCentre:\n if center not in game.get_blank_spaces() and loc_opponent == loc_mirror and len(game.get_legal_moves(player)) == len(game.get_legal_moves(opponent)):\n return float(\"inf\")\n # (4) Finally, we simply return number of moves active player can make minus number of moves opponent can make minus the distance from the centre, weighted by the game phase\n w, h = center\n y, x = loc_player\n dist = float((h - y)**2 + (w - x)**2)\n return (float(len(game.get_legal_moves(player)))-2.0*float(len(game.get_legal_moves(opponent)))-dist)*game_phase", "def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the improved score\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n improved = len(player_legal_moves) - len(opponent_legal_moves)\n if improved != 0:\n return float(improved)\n \n # Second get differences from center\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n player_coordinates = game.get_player_location(player)\n opponent_coordinates = game.get_player_location(opponent)\n player_center_dist = get_distances_from_center(center_coordinates, player_coordinates)\n opponent_center_dist = get_distances_from_center(center_coordinates, opponent_coordinates)\n center_dist_diff = player_center_dist - opponent_center_dist\n \n # Third obtain next_moves\n player_next_moves = [get_next_moves(game, move, list(move)) for move in player_legal_moves]\n opponent_next_moves = [get_next_moves(game, move, list(move)) for move in opponent_legal_moves] \n improved_next = len(player_next_moves) - len(opponent_next_moves)\n \n # Put player and opponent feature differences in a tuple/vector surrogoate\n feature_diff_vector = (improved, center_dist_diff, improved_next)\n \n # Provide a weighting vector for the features of each player-participant\n weight_vector = (1.5,0.1,1.0)\n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(feature_diff_vector, weight_vector))\n \n return float(weighted_difference_dot_product)", "def p(party, vote_count, s):\n return t(party, vote_count) / d(s)", "def custom_score(game, player):\n \"\"\" custom_score heuristic function idea is to implement aggressive heuristic function \n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) # Calculate length of myPlayer moves\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player))) # Calculate length of opposite player moves same as custom score 2\n return float(length_my_player_moves - 1.5*length_opp_payer_moves)", "def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.move_count < 15:\n return center_modified_score(game, player)\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n\n if game.move_count < 15:\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)\n\n delta = 0\n\n moves = game.get_legal_moves()\n initial_moves_count = len(moves)\n indexes = np.random.permutation(initial_moves_count)\n\n for i in range(0, min(4, initial_moves_count)):\n first_level = True\n simulation = game.copy()\n\n while True:\n moves = simulation.get_legal_moves()\n moves_count = len(moves)\n if moves_count == 0:\n if simulation.is_winner(player):\n delta = delta + 1\n else:\n delta = delta - 1\n break\n if first_level:\n selected_move = indexes[i]\n first_level = False\n else:\n selected_move = random.randint(0, moves_count - 1)\n\n simulation.apply_move(moves[selected_move])\n\n return float(own_moves + delta) #float(own_moves - opp_moves + 5 * delta)\n\n #return float(own_moves - opp_moves + free_area_score(game, player) - free_area_score(game, game.get_opponent(player)))", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the base information to calculate player & opponent\n # feature values\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n if len(player_legal_moves) != len(opponent_legal_moves):\n return float(len(player_legal_moves) - len(opponent_legal_moves))\n \n # Get_center_coordinates and opponent. Then set the list of participants\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n participants = [player, opponent]\n \n # Then, for each participant obtain his/her feature values \n for participant in participants:\n if participant == player:\n p_legal_moves = player_legal_moves\n player_either = player\n participant_coordinates = p_y, p_x = \\\n game.get_player_location(participant)\n player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, \\\n player_path_count, player_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either,participant_coordinates, p_legal_moves)\n else:\n p_legal_moves = opponent_legal_moves\n player_either = opponent\n participant_coordinates = p_y, p_x \\\n = game.get_player_location(participant)\n opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, opponent_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either, participant_coordinates, p_legal_moves)\n \n # Place each participant's feature values in a tuple/vector surrogate \n pro_player_vector = \\\n (player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, player_path_count, \\\n opponent_min_center_diff)\n pro_opponent_vector = \\\n (opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, player_min_center_diff)\n \n # Provide a weighting vector for the features \n weight_vector = (1.5,0.1,1.0,0.001,0.001,0.001)\n \n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*(q-r ) for p,q,r \\\n in zip(weight_vector, pro_player_vector, pro_opponent_vector))\n \n return float(weighted_difference_dot_product)", "def calculate_overall_rating(player_dict):\r\n if player_dict[\"position\"].upper() == \"QB\":\r\n throw_power = int(max(min(int(player_dict[\"throw_power\"]), 99), 70))\r\n throw_accuracy = int(max(min(math.ceil(\r\n ((2 * (\r\n int(player_dict[\"throw_accuracy_short\"]) + \r\n int(player_dict[\"throw_accuracy_mid\"]) + \r\n int(player_dict[\"throw_accuracy_deep\"]) + \r\n int(player_dict[\"throw_on_the_run\"]) + \r\n int(player_dict[\"playaction\"])\r\n )) - (2 * min(\r\n int(player_dict[\"throw_accuracy_short\"]), \r\n int(player_dict[\"throw_accuracy_mid\"]), \r\n int(player_dict[\"throw_accuracy_deep\"]), \r\n int(player_dict[\"throw_on_the_run\"]), \r\n int(player_dict[\"playaction\"])\r\n ))\r\n ) / 8\r\n ), 99), 60))\r\n break_tackles = int(max(min(\r\n math.ceil(((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 7), \r\n 90), 20))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 98), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((throw_power - 50.0) / 10.0) * 4.9\r\n overall_rating += ((throw_accuracy - 50.0) / 10.0) * 5.8\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.0\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"HB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 70), 25))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 50))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 50))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 0.33\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 2.0\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.6\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 75), 40))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 55))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 95), 60))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.0\r\n overall_rating += ((run_block - 50.0) / 10.0) * 7.2\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 1.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.0\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 39), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"WR\":\r\n break_tackles = int(max(min(\r\n math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2), \r\n 80), 35))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 75))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 35))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 65))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.3\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 4.75\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"TE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 95), 20))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 80), 35))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 35))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.65\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.65\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.65\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.25\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.25\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.4\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.2\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.2\r\n overall_rating += ((run_block - 50.0) / 10.0) * 5.4\r\n overall_rating = int(max(min((round(overall_rating) + 35), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LT\" or player_dict[\"position\"].upper() == \"RT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 0.8\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 4.75\r\n overall_rating += ((run_block - 50.0) / 10.0) * 3.75\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if (player_dict[\"position\"].upper() == \"LG\" or player_dict[\"position\"].upper() == \"RG\" or \r\n player_dict[\"position\"].upper() == \"C\"):\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.7\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.25\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.25\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 3.25\r\n overall_rating += ((run_block - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LE\" or player_dict[\"position\"].upper() == \"RE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 45))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.75\r\n overall_rating += ((awareness - 50.0) / 10.0) * 1.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.75\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 3.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"DT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 5.5\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.55\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LOLB\" or player_dict[\"position\"].upper() == \"ROLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 70))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 90), 20))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.6\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.4\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.3\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"MLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 65))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 5.2\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.65\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.75\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"CB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 40))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 40))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 85), 30))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.85\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.55\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.35\r\n overall_rating += ((catching - 50.0) / 10.0) * 3\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.55\r\n overall_rating += ((tackle - 50.0) / 10.0) * 1.55\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.5\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.5\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.0\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.5\r\n overall_rating += ((tackle - 50.0) / 10.0) * 2.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"SS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.2\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.7\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.7\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.2\r\n overall_rating += ((jumping - 50.0) / 10.0) * 0.9\r\n overall_rating += ((tackle - 50.0) / 10.0) * 3.2\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"K\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 35))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-177 + (0.218 * awareness) + (1.28 * kick_power) + (1.47 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"P\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 40))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-183 + (0.218 * awareness) + (1.5 * kick_power) + (1.33 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating", "def get_improved_score_factor(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def custom_score_3(game, player):\n \n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n player_legal_move_count, opponent_legal_move_count = \\\n len(player_legal_moves), len(opponent_legal_moves)\n move_count_difference = player_legal_move_count - opponent_legal_move_count\n # Find coordinates of center box\n h, w = get_center_coordinates(game)\n # Retrieve player's coordinates\n y, x = game.get_player_location(player)\n # Obtain coordinate further, closest to origin\n furthest_coord, closest_coord = max(h - y, w -x), min(h - y, w - x)\n # Return weighted, vector-valued length from origin / sum of weights\n weighted_distance_from_center = \\\n math.sqrt((closest_coord**2 + 2*(furthest_coord**2)))/3\n feature_vector = (move_count_difference, weighted_distance_from_center)\n \n weight_vector = (1.0,0.1)\n \n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(weight_vector, feature_vector)) \n \n return float(weighted_difference_dot_product)", "def evaluate_board(self, board):\n \n win_score = 100\n win_or_loss_score = 50\n lose_score = 0\n \n if board.win_for(self.opponent()):\n return lose_score\n if board.win_for(self.side):\n return win_score\n if not board.win_for(self.side) or not board.win_for(self.opponent()):\n return win_or_loss_score", "def compute_drift_score(ref_col_prob, col_prob):\n\n return sum(abs(np.asarray(ref_col_prob) - np.array(col_prob)) * 100)", "def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities", "def evaluate(game, player):\n weights = [2, 200, 2000, 20000]\n reward = 0\n opponent = get_opponent(player)\n for length in range(2, 6):\n reward += weights[length - 2] * get_num_series(game, player, length)\n reward -= weights[length - 2] * get_num_series(game, opponent, length)\n return reward", "def ucbScore(self,totalPlayedTimes):\n winRate = self.winRate()\n #print totalPlayedTimes\n #print self.playedTimes\n confidenceInterval = math.sqrt(2 * math.log(totalPlayedTimes,math.e) / self.playedTimes)\n \n return winRate + confidenceInterval", "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def evaluate(self):\n # if player has no move, then player lost, -inf or inf depend on who the player is\n # if player has moves, use heuristics.\n \n #checkColorMoves = self.getAvailableMoves(self.colorIndex)\n #otherColorMoves = self.getAvailableMoves(1-self.colorIndex)\n \n checkColorMoves = self.getAvailableMovesPreferLonger(self.colorIndex)\n otherColorMoves = self.getAvailableMovesPreferLonger(1-self.colorIndex)\n\n checkColorPieces = self.getPieceCount(self.colorIndex)\n otherColorPieces = self.getPieceCount(1-self.colorIndex)\n\n #checkColorEdgePieces = self.getEgdePieceCount(self.colorIndex)\n #otherColorEdgePieces = self.getEgdePieceCount(1-self.colorIndex)\n\n if self.player == 'computer':\n if checkColorMoves == 0: #computer doesn't have moves\n return float('-inf')\n elif otherColorMoves == 0: #user doesn't have moves\n return float('inf')\n else:\n #return checkColorPieces - otherColorPieces\n return checkColorMoves - otherColorMoves\n else:\n if checkColorMoves == 0: #user doesn't have moves\n return float('inf')\n elif otherColorMoves == 0: #computer doesn't have moves\n return float('-inf')\n else:\n #return otherColorPieces - checkColorPieces\n return otherColorMoves - checkColorMoves", "def compute_utility(self, board, move, player):\n r_alive = 0\n b_alive = 0\n rk_alive = 0\n bk_alive = 0\n for line in range(8):\n for col in range(8):\n if board[line][col] == \"R\":\n r_alive += 1\n elif board[line][col] == \"B\":\n b_alive += 1\n elif board[line][col] == \"RK\":\n rk_alive += 1\n elif board[line][col] == \"BK\":\n bk_alive += 1\n # if r_Alive > b_Alive:\n # if b_Alive == 0:\n # return 1\n # else: return 0\n # elif r_Alive == 0:\n # return -1\n powkings = 1.2\n result = 0\n if player == 'B':\n result = rk_alive*powkings + r_alive - bk_alive*powkings - b_alive\n else:\n result = bk_alive*powkings + b_alive - rk_alive*powkings - r_alive\n return result", "def rate_club(user, club):\n if not user.is_authenticated():\n return None\n if not club.posel_set.exists():\n return None\n return sum(x[1] for x in rank_in_club(user, club)) / club.posel_set.count()", "def custom_score_general(game, player, constants=[]):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n v = []\n\n if constants[0] != 0 or constants[2] != 0:\n own_moves = number_moves(game, player) / 8\n\n if own_moves == 0:\n return float(\"-inf\")\n\n v.append(own_moves)\n\n if constants[1] != 0 or constants[2] != 0:\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n\n if opp_moves == 0:\n return float(\"inf\")\n\n v.append(opp_moves)\n\n if constants[2] != 0:\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n v.append(move_ratio)\n\n if constants[3] != 0 or constants[5] != 0:\n own_openness = nearby_openness(game, player) / 80\n v.append(own_openness)\n\n if constants[4] != 0 or constants[5] != 0:\n opp_openness = nearby_openness(game, game.get_opponent(player)) / 80\n v.append(opp_openness)\n\n if constants[5] != 0:\n openness_ratio = (own_openness * 80) / (opp_openness + 0.0001 * 80) /80\n v.append(openness_ratio)\n\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n if constants[6] != 0 or constants[8] != 0:\n own_centerness = centerness(game, player) / centerness_max\n v.append(own_centerness)\n\n if constants[7] != 0 or constants[8] != 0:\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n v.append(opp_centerness)\n\n if constants[8] != 0:\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n return sum([x * y for x, y in zip(constants, v)])", "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n \r\n scores = []\r\n \r\n die_sides = [(die + 1) for die in range(num_die_sides)]\r\n \r\n pos_outcomes = gen_all_sequences(die_sides, num_free_dice)\r\n\r\n for outcome in pos_outcomes:\r\n scores.append(score(held_dice + outcome))\r\n \r\n expected_result = float(sum(scores))/len(scores)\r\n \r\n return expected_result", "def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))", "def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n #own_moves = len(game.get_legal_moves(player))\n\n #if game.move_count < 23:\n # opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n # return float(own_moves - opp_moves)\n\n return float(free_area_score(game, player) - free_area_score(game, game.get_opponent(player)))", "def get_win_rate_post_season_for_each_coach(self):\n # get winning games for coaches\n self.post_games_won_for_coaches = (\n self.raw_data_postseason\n [['Season','DayNum','WTeamID']]\n # merge for winning team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','WTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_win\",\"LastDayNum\":\"LastDayNum_win\",\"CoachName\":\"CoachName_win\",\"TeamID\":\"TeamID_win\"})\n .pipe(lambda x:x.assign(which_coach_for_win = np.where((x.FirstDayNum_win <= x.DayNum) & (x.LastDayNum_win >= x.DayNum),1,0)))\n .query(\"which_coach_for_win != 0\")\n .groupby(['Season','CoachName_win','WTeamID'])\n .agg({\"which_coach_for_win\":\"sum\"})\n .reset_index()\n )\n\n # get losing games for coaches\n self.post_games_lose_for_coaches = (\n self.raw_data_postseason\n [['Season','DayNum','LTeamID']]\n # merge for losing team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','LTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_lose\",\"LastDayNum\":\"LastDayNum_lose\",\"CoachName\":\"CoachName_lose\",\"TeamID\":\"TeamID_lose\"})\n .pipe(lambda x:x.assign(which_coach_for_lose = np.where((x.FirstDayNum_lose <= x.DayNum) & (x.LastDayNum_lose >= x.DayNum),1,0)))\n .query(\"which_coach_for_lose != 0\")\n .groupby(['Season','CoachName_lose','LTeamID'])\n .agg({\"which_coach_for_lose\":\"sum\"})\n .reset_index()\n )\n\n # combine games won and lost df for post season\n self.combine_post_games_won_lose = (\n self.post_games_lose_for_coaches\n .merge(self.post_games_won_for_coaches,how='left',left_on=['Season','LTeamID','CoachName_lose'],right_on=['Season','WTeamID','CoachName_win'])\n .pipe(lambda x:x.assign(win_rate_post = x.which_coach_for_win/(x.which_coach_for_win + x.which_coach_for_lose)))\n .drop(['CoachName_win','WTeamID'],1)\n .rename(columns={\"CoachName_lose\":\"CoachName\",\"LTeamID\":\"TeamID\",\"which_coach_for_lose\":\"post_games_lost\",\"which_coach_for_win\":\"post_games_won\"})\n .fillna(0)\n )", "def Winner(self, whichPlayer, tResult, score, gameCount):\n\n if whichPlayer == 0:\n tResult[0] = tResult[0] + score[0]\n else:\n tResult[1] = tResult[1] + score[1]\n print(data['tResult'],\"player1 \", tResult[0],\"player2 \",tResult[1])\n if gameCount == 3:\n if tResult[0] > tResult[1]:\n print(data['mplayer1'],tResult[0] - tResult[1])\n else:\n print(data['mplayer2'],tResult[1] - tResult[0])\n return whichPlayer, score, gameCount, tResult", "def state_score_naive(self, game_state, player, weights):\n # walls score\n other_players = [p for p in game_state.players if p != player]\n my_walls = player.num_walls\n their_walls = max([p.num_walls for p in other_players])\n walls_diff = (my_walls - their_walls)\n # path length score\n my_path = len(game_state.get_shortest_path_player(player))\n their_path = min([len(game_state.get_shortest_path_player(p)) for p in other_players])\n paths_diff = their_path - my_path\n \n return weights[0]*walls_diff + weights[1]*paths_diff", "def _evaluate_num_pieces(self, player):\n evaluation = 0\n if player is Player.black:\n evaluation += self.num_black_pieces * 10\n evaluation -= self.num_white_pieces * 10\n evaluation += self.num_black_kings * 10\n evaluation -= self.num_white_kings * 10\n elif player is Player.white:\n evaluation -= self.num_black_pieces * 10\n evaluation += self.num_white_pieces * 10\n evaluation -= self.num_black_kings * 10\n evaluation += self.num_white_kings * 10\n\n return evaluation", "def update_calculated_stats(self, player_data):\n # Updates calculated statistics\n fga = player_data['FGA']\n fgm = player_data['FGM']\n pa3 = player_data['3FGA']\n pm3 = player_data['3FGM']\n try:\n player_data['FG%'] = fgm/fga\n except:\n player_data['FG%'] = 0.0\n try:\n player_data['3FG%'] = pm3/pa3\n except:\n player_data['3FG%'] = 0.0\n return(player_data)" ]
[ "0.69899476", "0.66916925", "0.66868806", "0.661133", "0.64811707", "0.63243896", "0.62888366", "0.62563837", "0.623092", "0.6226826", "0.6220208", "0.62145406", "0.6178597", "0.6162016", "0.6135676", "0.6127374", "0.6116254", "0.60962725", "0.60838556", "0.6064083", "0.6043542", "0.6043173", "0.6042079", "0.6042079", "0.6042079", "0.6026387", "0.6002399", "0.5992365", "0.5990092", "0.59802413", "0.5967639", "0.59604764", "0.59492433", "0.59385693", "0.5927311", "0.59258217", "0.5918204", "0.5917469", "0.59118843", "0.59021616", "0.5895061", "0.586048", "0.5857438", "0.5847476", "0.5843569", "0.5818513", "0.581446", "0.58100194", "0.5809828", "0.5805471", "0.5791007", "0.57758796", "0.57748663", "0.57715964", "0.57664126", "0.5762934", "0.5756343", "0.57510525", "0.5750859", "0.5748141", "0.57460785", "0.573172", "0.57231414", "0.5720724", "0.5710389", "0.5708359", "0.5706943", "0.5700107", "0.56961644", "0.5692747", "0.5692365", "0.56806344", "0.5679775", "0.5676199", "0.5667093", "0.56641805", "0.5642257", "0.56381065", "0.56303304", "0.5626978", "0.5623134", "0.56222534", "0.5609644", "0.5605706", "0.5601818", "0.5601246", "0.5594787", "0.55937177", "0.55928826", "0.5592473", "0.55915415", "0.55890733", "0.55888695", "0.55878115", "0.5586946", "0.5583674", "0.558046", "0.5577753", "0.55760235", "0.5575941" ]
0.7008181
0
Method which calculate Assists Ratio of a player
def set_assists_ratio(self): bx = self.get_standard_stats() tcInt = float(bx["t2p_int"] + bx["t3p_int"]) denominador = tcInt + (0.44 * float(bx["tl_int"])) + float(bx["assists"]) +float(bx["turnovers"]) numerador = float(bx["assists"]) result = 0.00 if denominador > 0: result = (numerador / denominador) * 100 self.assists_ratio = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )", "def set_assists_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n team_tc_conv = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n player_tc_conv = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0:\n result = (bx[\"assists\"] / (((bx[\"minutes\"] / (team[\"minutes\"] / 5)) * team_tc_conv) - player_tc_conv))*100\n result = result if result <= 100 and result >= 0 else 0\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.assists_percentage = \"%.2f\" % round(result, 2)", "def cal_hit_ratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n score = 0.0\n # golden items hit in the top_K items\n score_1 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)])) for i,d in top_k.groupby('user')])\n score_2 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)])) for i,d in top_k.groupby('user')])\n score = score_1 - score_2\n return score/full['user'].nunique()", "def set_assists_per_turnover(self):\n bx = self.get_standard_stats()\n ratio = bx[\"assists\"]\n if bx[\"turnovers\"] > 0:\n ratio = bx[\"assists\"] / bx[\"turnovers\"]\n self.assists_per_turnover = \"%.2f\" % round(ratio, 2)", "def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities", "def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))", "def current_ratio(self):\n return self.current_assets / self.current_liabilities", "def calculateRatio(levelDims):\n highestReso = np.asarray(levelDims[0])\n lowestReso = np.asarray(levelDims[-1])\n Xratio, Yratio = highestReso/lowestReso\n return (Xratio, Yratio)", "def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT", "def winrate(matches):\n if not matches:\n print('no matches')\n return None\n\n win_loss = [match['result'] for match in matches]\n return sum(win_loss)/len(win_loss)", "def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent", "def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)", "def horizontal_ratio(self):\n if self.pupils_located:\n pupil_left = self.eye_left.pupil.x / (self.eye_left.center[0] * 2 - 10)\n pupil_right = self.eye_right.pupil.x / (self.eye_right.center[0] * 2 - 10)\n return (pupil_left + pupil_right) / 2", "def set_offensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0 and (bx[\"t2p_int\"] + bx[\"t3p_int\"]) > 0:\n fgm = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n fga = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n team_fgm = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n team_fga = team[\"t2p_int\"] + team[\"t3p_int\"]\n team_points = team[\"t2p_conv\"]*2 + team[\"t3p_conv\"]*3 + team[\"tl_conv\"]\n points = bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"]\n\n try:\n qAST = (Decimal(bx[\"minutes\"] / (team[\"minutes\"] / 5)) * (Decimal('1.14') * Decimal((team[\"assists\"] - bx[\"assists\"]) / team_fgm))) + \\\n Decimal((((team[\"assists\"] / team[\"minutes\"]) * bx[\"minutes\"] * 5 - bx[\"assists\"]) / ((team_fgm / team[\"minutes\"]) * bx[\"minutes\"] * 5 - fgm)) * (1 - (bx[\"minutes\"] / (team[\"minutes\"] / 5))))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n qAST = 1\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n qAST = 1\n\n fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n ast_part = Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2*(team_fga - fga))) * bx[\"assists\"]\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n ast_part = 0\n\n if bx[\"tl_int\"] > 0:\n ft_part = Decimal(1 - (1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_part = 0\n team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team[\"tl_conv\"] / team[\"tl_int\"]))**2) * team[\"tl_int\"] * Decimal('0.4'))\n try:\n team_orb_percentage = Decimal(team[\"reb_of\"] / (team[\"reb_of\"] + ((opp_team[\"reb_def\"] + opp_team[\"reb_of\"]) - opp_team[\"reb_of\"])))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n\n team_play_percentage = Decimal(team_scoring_poss / (team_fga + team[\"tl_int\"] * Decimal('0.4') + team[\"turnovers\"]))\n try:\n team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_weight = 0\n\n orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage\n\n fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)\n if bx[\"tl_conv\"] > 0:\n ft_x_poss = Decimal((1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_x_poss = Decimal(1 - (bx[\"tl_conv\"] / 1)**2) * Decimal('0.4') * bx[\"tl_int\"]\n try:\n sc_poss = (fg_part + ast_part + ft_part) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n sc_poss =0\n\n tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx[\"turnovers\"]\n\n pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx[\"t3p_conv\"]) * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team[\"t3p_conv\"] - bx[\"t3p_conv\"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2 * (team_fga - fga))) * bx[\"assists\"]\n except:\n pprod_ast_part = 0\n\n pprod_orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team[\"tl_conv\"] / team[\"tl_int\"])**2) * Decimal('0.4') * team[\"tl_int\"]))\n try:\n pprod = (pprod_fg_part + pprod_ast_part + bx[\"tl_conv\"]) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n pprod = 0\n\n try:\n result = 100 * (pprod / tot_poss)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n result = 0\n\n # print(\"fgm: \" + str(fgm))\n # print(\"fga: \" + str(fga))\n # print(\"team_fgm: \" + str(team_fgm))\n # print(\"team_fga: \" + str(team_fga))\n # print(\"team_points: \" + str(team_points))\n # print(\"points: \" + str(points))\n # print(\"qAST: \" + str(qAST))\n # print(\"fg_part: \" + str(fg_part))\n # print(\"ast_part: \" + str(ast_part))\n # print(\"ft_part: \" + str(ft_part))\n # print(\"team_scoring_poss: \" + str(team_scoring_poss))\n # print(\"team_orb_percentage: \" + str(team_orb_percentage))\n # print(\"team_play_percentage: \" + str(team_play_percentage))\n # print(\"team_orb_weight: \" + str(team_orb_weight))\n # print(\"orb_part: \" + str(orb_part))\n # print(\"fg_x_poss: \" + str(fg_x_poss))\n # print(\"ft_x_poss: \" + str(ft_x_poss))\n # print(\"sc_poss: \" + str(sc_poss))\n # print(\"tot_poss: \" + str(tot_poss))\n # print(\"pprod_fg_part: \" + str(pprod_fg_part))\n # print(\"pprod_ast_part: \" + str(pprod_ast_part))\n # print(\"pprod_orb_part: \" + str(pprod_orb_part))\n # print(\"pprod: \" + str(pprod))\n # print(\"result: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n\n self.ortg = \"%.2f\" % round(result, 2)\n if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:\n \"\"\"For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG \n using team's formula\"\"\"\n print(BCOLORS.OKBLUE + \"ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo\" + BCOLORS.ENDC)\n bx = self.get_standard_stats()\n result = round((bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])/self.get_team_possessions(), 2)\n self.ortg = \"%.2f\" % result", "def golden_ratio():\n print((1+math.sqrt(5))/2)", "def exceeded_ratio(self) -> float:\n return self.amount_spent / self.total_amount", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def GetResult(self, playerjm):\n return self.score / len(self.scores)", "def win_ratio_avg(self):\n win_ratio = 0\n # Adds all the win ratios of team in this conference which will be\n # used to compute the win ratio average.\n for team_obj in self._conf_teams:\n ### INVARIANT: team_obj is a Team class object and\n ### self._conf_teams is a list of Team class objects.\n win_ratio += team_obj._win_ratio\n return win_ratio/len(self._conf_teams)", "def administer(self):\n\n score = 0.0\n for question in self.questions:\n if question.ask_and_evaluate() is True:\n score += 1\n return (score / len(self.questions)) * 100", "def get_real_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/self.votes", "def _ratio(a1, a2):\n abs_residues = np.abs(a1 - a2).sum()\n avg_abs_sum = 0.5 * np.abs(a1).sum() + 0.5 * np.abs(a2).sum()\n return abs_residues / avg_abs_sum", "def calculate_score_pairs(hand_value,*args):\n # ratios=[1,10,100,1000,10000]\n ratios = CONST.RATIOS[:]\n return sum(map(lambda a,b:a/b, args, ratios))+hand_value", "def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n #Between 1-8\n return own_moves / opp_moves", "def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def expected_value(held_dice, num_die_sides, num_free_dice):\n result = 0\n outcomes = range(1, num_die_sides + 1)\n possible = sorted(gen_all_sequences(outcomes, num_free_dice))\n for hand in possible:\n result += score(held_dice + hand)\n return float(result)/len(possible)", "def p(party, vote_count, s):\n return t(party, vote_count) / d(s)", "def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores", "def mc_update_scores(scores, board, player):\n dim = board.get_dim()\n winner = board.check_win()\n other_player = provided.switch_player(player)\n \n if winner == provided.DRAW:\n ratio = {player: 0, other_player: 0, 1: 0}\n elif winner == player:\n ratio = {player: 0 + SCORE_CURRENT, other_player: 0 - SCORE_OTHER, provided.EMPTY: 0}\n elif winner == other_player:\n ratio = {player: 0 - SCORE_CURRENT, other_player: 0 + SCORE_OTHER, provided.EMPTY: 0}\t\n \n for valx in range(dim):\n for valy in range(dim): \n scores[valx][valy] += ratio[board.square(valx, valy)] \n return scores", "def calculate_overall_rating(player_dict):\r\n if player_dict[\"position\"].upper() == \"QB\":\r\n throw_power = int(max(min(int(player_dict[\"throw_power\"]), 99), 70))\r\n throw_accuracy = int(max(min(math.ceil(\r\n ((2 * (\r\n int(player_dict[\"throw_accuracy_short\"]) + \r\n int(player_dict[\"throw_accuracy_mid\"]) + \r\n int(player_dict[\"throw_accuracy_deep\"]) + \r\n int(player_dict[\"throw_on_the_run\"]) + \r\n int(player_dict[\"playaction\"])\r\n )) - (2 * min(\r\n int(player_dict[\"throw_accuracy_short\"]), \r\n int(player_dict[\"throw_accuracy_mid\"]), \r\n int(player_dict[\"throw_accuracy_deep\"]), \r\n int(player_dict[\"throw_on_the_run\"]), \r\n int(player_dict[\"playaction\"])\r\n ))\r\n ) / 8\r\n ), 99), 60))\r\n break_tackles = int(max(min(\r\n math.ceil(((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 7), \r\n 90), 20))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 98), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((throw_power - 50.0) / 10.0) * 4.9\r\n overall_rating += ((throw_accuracy - 50.0) / 10.0) * 5.8\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.0\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"HB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 70), 25))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 50))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 50))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 0.33\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 2.0\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.6\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 75), 40))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 55))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 95), 60))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.0\r\n overall_rating += ((run_block - 50.0) / 10.0) * 7.2\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 1.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.0\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 39), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"WR\":\r\n break_tackles = int(max(min(\r\n math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2), \r\n 80), 35))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 75))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 35))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 65))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.3\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 4.75\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"TE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 95), 20))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 80), 35))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 35))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.65\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.65\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.65\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.25\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.25\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.4\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.2\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.2\r\n overall_rating += ((run_block - 50.0) / 10.0) * 5.4\r\n overall_rating = int(max(min((round(overall_rating) + 35), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LT\" or player_dict[\"position\"].upper() == \"RT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 0.8\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 4.75\r\n overall_rating += ((run_block - 50.0) / 10.0) * 3.75\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if (player_dict[\"position\"].upper() == \"LG\" or player_dict[\"position\"].upper() == \"RG\" or \r\n player_dict[\"position\"].upper() == \"C\"):\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.7\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.25\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.25\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 3.25\r\n overall_rating += ((run_block - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LE\" or player_dict[\"position\"].upper() == \"RE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 45))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.75\r\n overall_rating += ((awareness - 50.0) / 10.0) * 1.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.75\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 3.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"DT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 5.5\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.55\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LOLB\" or player_dict[\"position\"].upper() == \"ROLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 70))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 90), 20))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.6\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.4\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.3\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"MLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 65))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 5.2\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.65\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.75\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"CB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 40))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 40))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 85), 30))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.85\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.55\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.35\r\n overall_rating += ((catching - 50.0) / 10.0) * 3\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.55\r\n overall_rating += ((tackle - 50.0) / 10.0) * 1.55\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.5\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.5\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.0\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.5\r\n overall_rating += ((tackle - 50.0) / 10.0) * 2.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"SS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.2\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.7\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.7\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.2\r\n overall_rating += ((jumping - 50.0) / 10.0) * 0.9\r\n overall_rating += ((tackle - 50.0) / 10.0) * 3.2\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"K\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 35))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-177 + (0.218 * awareness) + (1.28 * kick_power) + (1.47 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"P\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 40))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-183 + (0.218 * awareness) + (1.5 * kick_power) + (1.33 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def ret_vol_ratio(self) -> float:\n return self.geo_ret / self.vol", "def ratio_func(a, b):\n return a / b", "def custom_score_6(game, player):\n \"\"\"custom_score_6 heuristic function aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - 1.5*length_opp_payer_moves*length_opp_payer_moves)", "def calc_album_match(song: Song, result: Result) -> float:\n\n if not result.album:\n return 0.0\n\n return ratio(slugify(song.album_name), slugify(result.album))", "def cash_ratio(self):\n return self.cash / self.current_liabilities", "def state_score_naive(self, game_state, player, weights):\n # walls score\n other_players = [p for p in game_state.players if p != player]\n my_walls = player.num_walls\n their_walls = max([p.num_walls for p in other_players])\n walls_diff = (my_walls - their_walls)\n # path length score\n my_path = len(game_state.get_shortest_path_player(player))\n their_path = min([len(game_state.get_shortest_path_player(p)) for p in other_players])\n paths_diff = their_path - my_path\n \n return weights[0]*walls_diff + weights[1]*paths_diff", "def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)", "def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2", "def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2", "def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2", "def score(self,player, board):\r\n numPlayer = 0\r\n numOpp = 0\r\n for i in self.squares():\r\n if board[i] == player:\r\n numPlayer+= SQUARE_WEIGHTS[i]\r\n else:\r\n numOpp+=SQUARE_WEIGHTS[i]\r\n return numPlayer-numOpp", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = own_moves / opp_moves\n\n completeness = completeness_of_game(game)\n centerness_score = 0\n\n if completeness < 0.5:\n own_centerness = centerness(game, player)\n opp_centerness = centerness(game, game.get_opponent(player))\n centerness_ratio = own_centerness / opp_centerness + 0.1\n\n center_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def _ratio(sim: xr.DataArray, ref: xr.DataArray) -> xr.DataArray:\n out = sim / ref\n out.attrs[\"units\"] = \"\"\n return out", "def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)", "def get_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/(self.votes+self.field.weight)", "def mutual_info_score(self):\n _, _, I_CK = self._entropies()\n return I_CK / self.grand_total", "def odds_ratio(target_pct, peer_pct):\n odds_ratio = 0.0\n if peer_pct == 0.0:\n return None\n elif target_pct == 0.0:\n odds_ratio = 0.0\n elif target_pct == peer_pct:\n odds_ratio = 1.0\n elif peer_pct > 0.0 and target_pct < 1.0 and peer_pct < 1.0:\n odds_ratio = (target_pct/(1-target_pct))/(peer_pct/(1-peer_pct))\n return round(odds_ratio, 3)", "def update_calculated_stats(self, player_data):\n # Updates calculated statistics\n fga = player_data['FGA']\n fgm = player_data['FGM']\n pa3 = player_data['3FGA']\n pm3 = player_data['3FGM']\n try:\n player_data['FG%'] = fgm/fga\n except:\n player_data['FG%'] = 0.0\n try:\n player_data['3FG%'] = pm3/pa3\n except:\n player_data['3FG%'] = 0.0\n return(player_data)", "def custom_score_4(game, player):\n \"\"\"custom_score_4 heuristic function aims at minimizing loosing chances of myPlayer\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = 1.0 * len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(-length_opp_payer_moves/length_my_player_moves)", "def calc_match_points(self, match):\n if match.winner == match.TIE:\n match.home.tournament_score += 1\n match.away.tournament_score += 1\n else:\n match.winner.tournament_score += 3\n match.loser.tournament_score += 0", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate", "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n die_outcomes = set(range(1, num_die_sides + 1))\r\n \r\n possible_sequences = gen_all_sequences(die_outcomes, num_free_dice)\r\n \r\n total_score = 0.0\r\n for sequence in possible_sequences:\r\n total_score += score(held_dice + sequence)\r\n \r\n return float(total_score / len(possible_sequences))", "def golden_ratio():\n return 1.61803398875", "def ratio(self):\n try:\n return self.fields['uploadedEver'] / float(self.fields['downloadedEver'])\n except ZeroDivisionError:\n return 0.0", "def get_fool_ratio(self, test_acc, attack_accs):\n return [round(100*((test_acc - attack_acc) / test_acc), 2) for attack_acc in attack_accs]", "def calculate_score(player_cards):\n score = sum(player_cards)\n return score", "def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100", "def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)", "def totaled_ratio_calculator(numerator, denominator):\n if denominator != 0:\n ratio = round(float(numerator) / denominator, 3)\n else:\n ratio = 0\n return ratio", "def get_sharpe_ratio(allocs, prices):\n\tport_val = get_portfolio_value(prices, allocs, start_val=1.0)\n\tsharpe_ratio = get_portfolio_stats(port_val, daily_rf=0.0, samples_per_year=252)[3]\n\treturn -sharpe_ratio", "def expected_value(held_dice, num_die_sides, num_free_dice):\n list_scores = []\n die_sides = [die for die in range(1, num_die_sides + 1)]\n possible_seq = gen_all_sequences(die_sides, num_free_dice)\n for item in possible_seq:\n list_scores.append(score(held_dice + item))\n \n return float(sum(list_scores)) / len(list_scores)", "def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)", "def _blinking_ratio(self, landmarks, points):\n left = (landmarks.part(points[0]).x, landmarks.part(points[0]).y)\n right = (landmarks.part(points[3]).x, landmarks.part(points[3]).y)\n top = self._middle_point(landmarks.part(points[1]), landmarks.part(points[2]))\n bottom = self._middle_point(landmarks.part(points[5]), landmarks.part(points[4]))\n eye_width = math.hypot((left[0] - right[0]), (left[1] - right[1]))\n eye_height = math.hypot((top[0] - bottom[0]), (top[1] - bottom[1]))\n\n try:\n ratio = eye_width / eye_height\n except ZeroDivisionError:\n ratio = None\n\n return ratio", "def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores", "def eval(self):\n\n ratio_player_win = self.player_wins / self.num_test\n ratio_opponent_win = self.opponent_wins / self.num_test\n ratio_tie = 1.0 - ratio_player_win - ratio_opponent_win\n\n print(\"\\nPlayer Test Results:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_tie))\n\n ratio_optimal_win = self.optimal_wins / self.num_test\n ratio_optimal_loose = self.optimal_losses / self.num_test\n ratio_optimal_tie = 1.0 - ratio_optimal_win - ratio_optimal_loose\n\n print(\"\\nOptimal Results:\")\n print(\"\\tPlayer {0:.2f}%\".format(100.0 * ratio_optimal_win))\n print(\"\\tOpponent {0:.2f}%\".format(100.0 * ratio_optimal_loose))\n print(\"\\tTie {0:.2f}%\".format(100.0 * ratio_optimal_tie))\n\n # Ratio of win, loss diff between player and optimal\n # positive if the player beats opponent\n relative_result = ((ratio_player_win - ratio_opponent_win) /\n (ratio_optimal_win - ratio_optimal_loose))\n\n print(\"\\nResults Player Relative Optimal:\")\n print(\"\\tWins {0:.2f}%\".format(100.0 * ratio_player_win / ratio_optimal_win))\n print(\"\\tLosses {0:.2f}%\".format(100.0 * ratio_opponent_win / ratio_optimal_loose))\n print(\"\\tScore {0:.2f}%\".format(100.0 * relative_result))\n\n if self.last_test is not None:\n print(\"Diff from last test score is {0:.2f}%\".format(100.0 * (relative_result - self.last_test)))\n self.last_test = relative_result", "def vratio(self):\n return self.run_command('vratio')[0]", "def get_verified_ratio(self):\n if len(self.pages) == 0: # There are no pages in this journal \n return 0, 0, 0\n verified = (1, 2, 4) \n numVerified = 0 \n numSeen = 0 \n for page in self.pages: \n numSeen += len(page.names) # page.names is a list of Name objects \n for name in page.names: \n if name.match in verified: \n numVerified += 1\n if numSeen == 0: # No names in any of the pages of the journal \n return 0, 0, 0\n return numVerified, numSeen, numVerified / numSeen", "def information_ratio(returns, factor_returns):\n active_return = returns - factor_returns\n tracking_error = np.std(active_return, ddof=1)\n if np.isnan(tracking_error):\n return 0.0\n return np.mean(active_return) / tracking_error", "def calculate_vote_fractions():\n return _calculate_vote_fractions(models.get_candidate_to_vote_count())", "def custom_score_3(game, player):\n \"\"\"custom_score_3 heuristic function aims at maximizing win chances of my agent\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = 1.0 * len(game.get_legal_moves(player))#Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves with oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(length_my_player_moves/length_opp_payer_moves)", "def heuristic_1_center(game, player) -> float:\n center_available_factor = get_center_available_factor(game, player)\n\n # Heuristic score output\n return float(center_available_factor)", "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def infected_ratio(self):\n if self.max_pop != 0:\n return int(self.infected_pop) / self.max_pop\n else:\n return 1", "def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = get_outcomes(num_die_sides)\n print \"outcomes:\", outcomes\n\n # generate all possible sequences of rolls\n all_rolls = list(gen_all_sequences(outcomes, num_free_dice))\n results = [max_repeats(roll) for roll in all_rolls]\n value = 0.0 \n\n\n for result in all_rolls:\n curr_hand = tuple(list(held_dice) + list(result))\n value += score(curr_hand)\n\n return value / len(all_rolls)", "def custom_score_5(game, player):\n \"\"\"custom_score_5 heuristic function defines chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)", "def weighted_score(player, board):\n opp = Othello.opponent(player)\n total = 0\n for sq in Othello.squares():\n if board[sq] == player:\n total += SQUARE_WEIGHTS[sq]\n elif board[sq] == opp:\n total -= SQUARE_WEIGHTS[sq]\n return total", "def custom_score(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n own_moves = game.get_legal_moves(player)\n\n return len(own_moves) / max(len(opp_moves), 1e-6)", "def score(self):\n # loop over aminoacids in protein and calculate how often H and C are surrounded by H and C\n for aminoacid in self.aminoacids:\n if aminoacid.aminoacid_type == \"H\":\n self.stability = self.stability + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number))\n elif aminoacid.aminoacid_type == \"C\":\n self.stability = self.stability + (-5 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number))\n self.stability = self.stability/2\n return int(self.stability)", "def taper_ratio(self) -> float:\n return self.xsecs[-1].chord / self.xsecs[0].chord", "def pe_ratio(self):\n try:\n return self.price / self.dividend_yield\n except ZeroDivisionError:\n return 0.0", "def calculate_score(self):\n\n correct_award = 150\n turns_total = self.turns.count()\n turns_correct = self.turns.filter(is_match=True).count()\n seconds_left = (60.0 - (self.turns.last().created - self.turns.first().created).total_seconds()) or 0\n maxpoints = turns_correct * correct_award\n deduction_for_errors = correct_award * 0.11123\n\n maxpoints -= ((turns_total - turns_correct) * 2 * deduction_for_errors)\n maxpoints += seconds_left * 5.123214\n\n return Decimal(maxpoints)", "def _estimate_strength_from_results(\n number_of_games: int, number_of_wins: int, opponent_rating: float\n) -> Tuple[float, Tuple[float, float]]:\n n, p = number_of_games, number_of_wins / number_of_games\n q = 1 - p\n\n if n * p * q < 9: # Cannot apply normal approximation of binomial distribution\n raise ValueError(\n \"The results obtained in evaluate_player are too extreme to obtain an \"\n \"accurate player evaluation. You can try to solve this issue by increasing\"\n \" the total number of battles. Obtained results: %d victories out of %d\"\n \" games.\" % (p * n, n)\n )\n\n estimate = opponent_rating * p / q\n error = (\n math.sqrt(n * p * q) / n * 1.96\n ) # 95% confidence interval for normal distribution\n\n lower_bound = max(0, p - error)\n lower_bound = opponent_rating * lower_bound / (1 - lower_bound)\n\n higher_bound = min(1, p + error)\n\n if higher_bound == 1:\n higher_bound = math.inf\n else:\n higher_bound = opponent_rating * higher_bound / (1 - higher_bound)\n\n return estimate, (lower_bound, higher_bound)", "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n \r\n scores = []\r\n \r\n die_sides = [(die + 1) for die in range(num_die_sides)]\r\n \r\n pos_outcomes = gen_all_sequences(die_sides, num_free_dice)\r\n\r\n for outcome in pos_outcomes:\r\n scores.append(score(held_dice + outcome))\r\n \r\n expected_result = float(sum(scores))/len(scores)\r\n \r\n return expected_result", "def calculate(self):\n\n rating = 0\n\n props = ['aroma', 'appearance', 'taste', 'palate', 'bottle_style']\n for item in props:\n rating += getattr(self, item, 0)\n\n self.overall = (rating / self.total) / .2", "def custom_score_7(game, player):\n \"\"\"custom_score_7 heuristic function also aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(1.5*length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)", "def _calculate_score(predictions: np.ndarray, correct: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(np.log(predictions + 1) - np.log(correct + 1))) / len(correct))", "def get_score(self, player: int) -> int:\n score = 0\n i = 0\n while i < len(self.leylines):\n score += 1 if self.leylines[i].player == player else 0\n score += 1 if self.rights[i].player == player else 0\n score += 1 if self.lefts[i].player == player else 0\n i += 1\n return score", "def vertical_ratio(self):\n if self.pupils_located:\n pupil_left = self.eye_left.pupil.y / (self.eye_left.center[1] * 2 - 10)\n pupil_right = self.eye_right.pupil.y / (self.eye_right.center[1] * 2 - 10)\n return (pupil_left + pupil_right) / 2", "def calc_artists_match(song: Song, result: Result) -> float:\n\n artist_match_number = 0.0\n\n # Result has only one artist, return 0.0\n if len(song.artists) == 1 or not result.artists:\n return artist_match_number\n\n artist1_list, artist2_list = based_sort(\n list(map(slugify, song.artists)), list(map(slugify, result.artists))\n )\n\n artists_match = 0.0\n for artist1, artist2 in zip_longest(artist1_list, artist2_list):\n artist12_match = ratio(artist1, artist2)\n artists_match += artist12_match\n\n artist_match_number = artists_match / len(artist1_list)\n\n debug(song.song_id, result.result_id, f\"Artists match: {artist_match_number}\")\n\n return artist_match_number", "def information_ratio(returns, factor_returns):\n if len(returns) < 2:\n return np.nan\n\n active_return = _adjust_returns(returns, factor_returns)\n tracking_error = np.std(active_return, ddof=1)\n if np.isnan(tracking_error):\n return 0.0\n if tracking_error == 0:\n return np.nan\n return np.mean(active_return) / tracking_error", "def quality(self, rating_groups, weights=None):\n rating_groups, keys = self.validate_rating_groups(rating_groups)\n weights = self.validate_weights(weights, rating_groups, keys)\n flatten_ratings = sum(map(tuple, rating_groups), ())\n flatten_weights = sum(map(tuple, weights), ())\n length = len(flatten_ratings)\n # a vector of all of the skill means\n mean_matrix = Matrix([[r.mu] for r in flatten_ratings])\n # a matrix whose diagonal values are the variances (sigma ** 2) of each\n # of the players.\n def variance_matrix(height, width):\n variances = (r.sigma ** 2 for r in flatten_ratings)\n for x, variance in enumerate(variances):\n yield (x, x), variance\n variance_matrix = Matrix(variance_matrix, length, length)\n # the player-team assignment and comparison matrix\n def rotated_a_matrix(set_height, set_width):\n t = 0\n for r, (cur, next) in enumerate(zip(rating_groups[:-1],\n rating_groups[1:])):\n for x in range(t, t + len(cur)):\n yield (r, x), flatten_weights[x]\n t += 1\n x += 1\n for x in range(x, x + len(next)):\n yield (r, x), -flatten_weights[x]\n set_height(r + 1)\n set_width(x + 1)\n rotated_a_matrix = Matrix(rotated_a_matrix)\n a_matrix = rotated_a_matrix.transpose()\n # match quality further derivation\n _ata = (self.beta ** 2) * rotated_a_matrix * a_matrix\n _atsa = rotated_a_matrix * variance_matrix * a_matrix\n start = mean_matrix.transpose() * a_matrix\n middle = _ata + _atsa\n end = rotated_a_matrix * mean_matrix\n # make result\n e_arg = (-0.5 * start * middle.inverse() * end).determinant()\n s_arg = _ata.determinant() / middle.determinant()\n return math.exp(e_arg) * math.sqrt(s_arg)", "def get_average_distance_to_opponents(obs, player_x, player_y):\n distances_sum = 0\n distances_amount = 0\n for i in range(1, len(obs[\"right_team\"])):\n # if opponent is ahead of player\n if obs[\"right_team\"][i][0] > (player_x - 0.02):\n distance_to_opponent = get_distance(player_x, player_y, obs[\"right_team\"][i][0], obs[\"right_team\"][i][1])\n if distance_to_opponent < 0.03:\n distances_sum += distance_to_opponent\n distances_amount += 1\n # if there is no opponents close around\n if distances_amount == 0:\n return 2, distances_amount\n return distances_sum / distances_amount, distances_amount", "def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))", "def entropy(self) -> float:\n probabilities = np.array([len(players) for players in self.answers.values()])\n probabilities = probabilities / sum(probabilities)\n return sc.stats.entropy(probabilities)", "def calculate_perimeter_ratio(gt_perimeter, perf_perimeter):\n return min(gt_perimeter, perf_perimeter) / max(gt_perimeter, perf_perimeter)", "def get_ratio_guarantee_advance(self):\n return (\n self.ratio_guarantee_advance *\n self.get_period_guarantee_advance *\n self.ratio2_guarantee_advance\n )", "def sharpe_ratio(factor_returns, annualization_factor):\r\n\r\n return annualization_factor * factor_returns.mean() / factor_returns.std()", "def calculate_a(self):\n self.a = float(len(self.neighbors)) / total_connections" ]
[ "0.72646964", "0.64316165", "0.6352074", "0.6281999", "0.6247194", "0.6209247", "0.6207199", "0.61734754", "0.61600745", "0.61037296", "0.609562", "0.607315", "0.60665053", "0.60465765", "0.6046293", "0.60187536", "0.6009164", "0.59791255", "0.5958189", "0.59420407", "0.5881529", "0.5880197", "0.58754414", "0.5865603", "0.5865355", "0.5836641", "0.5815389", "0.5805758", "0.57964987", "0.57841754", "0.5774557", "0.57651186", "0.5753168", "0.5747919", "0.573551", "0.5730925", "0.5729146", "0.5724391", "0.5722815", "0.5722815", "0.5722815", "0.57179284", "0.5703159", "0.5701336", "0.56977355", "0.5697034", "0.5685612", "0.5684638", "0.56842166", "0.5683044", "0.56809956", "0.5680193", "0.5667492", "0.5665352", "0.5664911", "0.566407", "0.56606287", "0.5660204", "0.5651556", "0.5648373", "0.56422526", "0.5638467", "0.5638272", "0.5636231", "0.5629354", "0.56291103", "0.56194985", "0.56179714", "0.56166536", "0.5614553", "0.5603615", "0.56022424", "0.5595793", "0.559293", "0.5592488", "0.5582595", "0.5579975", "0.5571609", "0.5571215", "0.55674523", "0.5566796", "0.5564611", "0.5560355", "0.5554855", "0.5541081", "0.55260545", "0.55236727", "0.55229205", "0.5519234", "0.550625", "0.55048186", "0.55013376", "0.54978853", "0.54838264", "0.5479403", "0.54767466", "0.5476007", "0.54758567", "0.5473573", "0.5471991" ]
0.7257836
1
Method which calculate Defensive Ratio of a player. The total points received in 100 possessions
def set_defensive_ratio(self): bx = self.get_standard_stats() team = self.get_team_stats() opp_team = self.get_opp_team_stats() if bx["minutes"] > 0: opp_fga = opp_team["t2p_int"] + opp_team["t3p_int"] opp_fgm = opp_team["t2p_conv"] + opp_team["t3p_conv"] try: dor = Decimal(opp_team["reb_of"] / (opp_team["reb_of"] + team["reb_def"])) except ZeroDivisionError: print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC) dor = 0 except InvalidOperation: print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC) dor = 0 try: dfg = Decimal(opp_fgm / opp_fga) except ZeroDivisionError: print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC) dfg = 0 try: fmwt = Decimal((dfg * (1 - dor)) / (dfg * (1 - dor) + (1 - dfg) * dor)) except: fmwt = 0 stops1 = bx["steals"] + bx["block_shots"] * fmwt * (1 - Decimal('1.07') * dor) + bx["reb_def"] * (1 - fmwt) try: stops2 = (Decimal((opp_fga - opp_fgm - team["block_shots"]) / team["minutes"]) * fmwt * (1 - Decimal('1.07') * dor) + Decimal((opp_team["turnovers"] - team["steals"]) / team["minutes"])) * bx["minutes"] + Decimal(bx["fouls_cm"] / team["fouls_cm"]) * Decimal('0.4') * opp_team["tl_int"] * (1 - Decimal(opp_team["tl_conv"] / opp_team["tl_int"]))**2 except ZeroDivisionError: print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC) stops2 = 0 except InvalidOperation: print(BCOLORS.WARNING + "Error: Invalid Operation" + BCOLORS.ENDC) stops2 = 0 stops = stops1 + stops2 poss = self.get_team_possessions() if bx["minutes"] > 0: stop_percentage = (float(stops) * float(opp_team["minutes"])) / (float(poss) * float(bx["minutes"])) else: stop_percentage = 0.00 opp_points = opp_team["t2p_conv"] * 2 + opp_team["t3p_conv"] * 3 + opp_team["tl_conv"] team_defensive_rating = 100 * (float(opp_points) / poss) try: d_pts_per_scposs = float(opp_points) / (float(opp_fgm) + (1 - (1 - (float(opp_team["tl_conv"]) / float(opp_team["tl_int"])))**2) * float(opp_team["tl_int"])*0.4) result = Decimal(team_defensive_rating) + Decimal('0.2') * (100 * Decimal(d_pts_per_scposs) * (1 - Decimal(stop_percentage)) - Decimal(team_defensive_rating)) except ZeroDivisionError: print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC) d_pts_per_scposs = 0 result = 0.00 # print("dor: " + str(dor)) # print("dfg: " + str(dfg)) # print("fmwt: " + str(fmwt)) # print("stops1: " + str(stops1)) # print("stops2: " + str(stops2)) # print("stops: " + str(stops)) # print("poss: " + str(poss)) # print("stop_percentage: " + str(stop_percentage)) # print("opp_points: " + str(opp_points)) # print("team_defensive_rating: " + str(team_defensive_rating)) # print("d_pts_per_scposs: " + str(d_pts_per_scposs)) # print("drtg: " + str(result) + "\n") else: result = 0.00 self.drtg = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n player_pvp_stats = data['pvp']['brackets']['ARENA_BRACKET_3v3']\r\n await ctx.message.channel.send(u\"Player: {:s}\").format(player)\r\n await ctx.message.channel.send(\"Rating: {:d}\".format(player_pvp_stats['rating']))\r\n await ctx.message.channel.send(\"Season Wins: {:d}\".format(player_pvp_stats['seasonWon']))\r\n await ctx.message.channel.send(\"Season Losses: {:d}\".format(player_pvp_stats['seasonLost']))\r\n\r\n if player_pvp_stats['seasonWon'] == 0 or player_pvp_stats['seasonLost'] == 0:\r\n await ctx.message.channel.send(\"Ratio: 0\")\r\n else:\r\n await ctx.message.channel.send(\"Ratio: {:.4f}\".format(\r\n float(player_pvp_stats['seasonWon'])/\r\n float(player_pvp_stats['seasonLost']))\r\n )", "def set_offensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0 and (bx[\"t2p_int\"] + bx[\"t3p_int\"]) > 0:\n fgm = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n fga = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n team_fgm = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n team_fga = team[\"t2p_int\"] + team[\"t3p_int\"]\n team_points = team[\"t2p_conv\"]*2 + team[\"t3p_conv\"]*3 + team[\"tl_conv\"]\n points = bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"]\n\n try:\n qAST = (Decimal(bx[\"minutes\"] / (team[\"minutes\"] / 5)) * (Decimal('1.14') * Decimal((team[\"assists\"] - bx[\"assists\"]) / team_fgm))) + \\\n Decimal((((team[\"assists\"] / team[\"minutes\"]) * bx[\"minutes\"] * 5 - bx[\"assists\"]) / ((team_fgm / team[\"minutes\"]) * bx[\"minutes\"] * 5 - fgm)) * (1 - (bx[\"minutes\"] / (team[\"minutes\"] / 5))))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n qAST = 1\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n qAST = 1\n\n fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n ast_part = Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2*(team_fga - fga))) * bx[\"assists\"]\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n ast_part = 0\n\n if bx[\"tl_int\"] > 0:\n ft_part = Decimal(1 - (1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_part = 0\n team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team[\"tl_conv\"] / team[\"tl_int\"]))**2) * team[\"tl_int\"] * Decimal('0.4'))\n try:\n team_orb_percentage = Decimal(team[\"reb_of\"] / (team[\"reb_of\"] + ((opp_team[\"reb_def\"] + opp_team[\"reb_of\"]) - opp_team[\"reb_of\"])))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n\n team_play_percentage = Decimal(team_scoring_poss / (team_fga + team[\"tl_int\"] * Decimal('0.4') + team[\"turnovers\"]))\n try:\n team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_weight = 0\n\n orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage\n\n fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)\n if bx[\"tl_conv\"] > 0:\n ft_x_poss = Decimal((1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_x_poss = Decimal(1 - (bx[\"tl_conv\"] / 1)**2) * Decimal('0.4') * bx[\"tl_int\"]\n try:\n sc_poss = (fg_part + ast_part + ft_part) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n sc_poss =0\n\n tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx[\"turnovers\"]\n\n pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx[\"t3p_conv\"]) * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team[\"t3p_conv\"] - bx[\"t3p_conv\"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2 * (team_fga - fga))) * bx[\"assists\"]\n except:\n pprod_ast_part = 0\n\n pprod_orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team[\"tl_conv\"] / team[\"tl_int\"])**2) * Decimal('0.4') * team[\"tl_int\"]))\n try:\n pprod = (pprod_fg_part + pprod_ast_part + bx[\"tl_conv\"]) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n pprod = 0\n\n try:\n result = 100 * (pprod / tot_poss)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n result = 0\n\n # print(\"fgm: \" + str(fgm))\n # print(\"fga: \" + str(fga))\n # print(\"team_fgm: \" + str(team_fgm))\n # print(\"team_fga: \" + str(team_fga))\n # print(\"team_points: \" + str(team_points))\n # print(\"points: \" + str(points))\n # print(\"qAST: \" + str(qAST))\n # print(\"fg_part: \" + str(fg_part))\n # print(\"ast_part: \" + str(ast_part))\n # print(\"ft_part: \" + str(ft_part))\n # print(\"team_scoring_poss: \" + str(team_scoring_poss))\n # print(\"team_orb_percentage: \" + str(team_orb_percentage))\n # print(\"team_play_percentage: \" + str(team_play_percentage))\n # print(\"team_orb_weight: \" + str(team_orb_weight))\n # print(\"orb_part: \" + str(orb_part))\n # print(\"fg_x_poss: \" + str(fg_x_poss))\n # print(\"ft_x_poss: \" + str(ft_x_poss))\n # print(\"sc_poss: \" + str(sc_poss))\n # print(\"tot_poss: \" + str(tot_poss))\n # print(\"pprod_fg_part: \" + str(pprod_fg_part))\n # print(\"pprod_ast_part: \" + str(pprod_ast_part))\n # print(\"pprod_orb_part: \" + str(pprod_orb_part))\n # print(\"pprod: \" + str(pprod))\n # print(\"result: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n\n self.ortg = \"%.2f\" % round(result, 2)\n if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:\n \"\"\"For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG \n using team's formula\"\"\"\n print(BCOLORS.OKBLUE + \"ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo\" + BCOLORS.ENDC)\n bx = self.get_standard_stats()\n result = round((bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])/self.get_team_possessions(), 2)\n self.ortg = \"%.2f\" % result", "def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT", "def exceeded_ratio(self) -> float:\n return self.amount_spent / self.total_amount", "def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))", "def KPI(self, total=True):\n \n data = self.select_table('ChordLog')\n correct = data[data['PredictedLabel'] == data['ActualLabel']]\n\n # % correctly predicted in chord net\n human_level_performance = (len(correct) / len(data)) * 100\n \n # round value\n human_level_performance = round(human_level_performance, 4) \n \n return human_level_performance", "def calculate_score(self):\n\n correct_award = 150\n turns_total = self.turns.count()\n turns_correct = self.turns.filter(is_match=True).count()\n seconds_left = (60.0 - (self.turns.last().created - self.turns.first().created).total_seconds()) or 0\n maxpoints = turns_correct * correct_award\n deduction_for_errors = correct_award * 0.11123\n\n maxpoints -= ((turns_total - turns_correct) * 2 * deduction_for_errors)\n maxpoints += seconds_left * 5.123214\n\n return Decimal(maxpoints)", "def pct(self):\n\t\treturn self.bottle.pct()", "def offensive_rating(data_frame, mode):\n off_rat = dict()\n average_points = calculate_average_points(data_frame, mode)\n for k, possessions in possessions_home_away(data_frame, mode).items():\n try:\n off_rat[k] = format(float(average_points[k]) * 100 / float(possessions), '.2f')\n except ZeroDivisionError:\n off_rat[k] = 0.0\n return off_rat", "def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities", "def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)", "def ratio_local_prod(self):\n if self.current_energy_produced == 0.0:\n return 1.0\n else:\n return 1. - self.export_grid / self.current_energy_produced", "def points_percentage(plane, p, points, total):\n match = 0\n for point in points:\n if distance_to_plane(plane, point) <= p:\n match += 1\n\n return match / total", "def get_free_set_percentage(self, params):\n raise NotImplementedError()", "def expected_value(held_dice, num_die_sides, num_free_dice):\n result = 0\n outcomes = range(1, num_die_sides + 1)\n possible = sorted(gen_all_sequences(outcomes, num_free_dice))\n for hand in possible:\n result += score(held_dice + hand)\n return float(result)/len(possible)", "def do_damage(self) -> float:\n sum = 0\n for operator in self.__operators:\n if operator.is_alive:\n operator.experience += 1\n sum += operator.experience / 100\n return 0.1 + sum", "def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)", "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n die_outcomes = set(range(1, num_die_sides + 1))\r\n \r\n possible_sequences = gen_all_sequences(die_outcomes, num_free_dice)\r\n \r\n total_score = 0.0\r\n for sequence in possible_sequences:\r\n total_score += score(held_dice + sequence)\r\n \r\n return float(total_score / len(possible_sequences))", "def expected_value(held_dice, num_die_sides, num_free_dice):\n list_scores = []\n die_sides = [die for die in range(1, num_die_sides + 1)]\n possible_seq = gen_all_sequences(die_sides, num_free_dice)\n for item in possible_seq:\n list_scores.append(score(held_dice + item))\n \n return float(sum(list_scores)) / len(list_scores)", "def custom_score_4(game, player):\n \"\"\"custom_score_4 heuristic function aims at minimizing loosing chances of myPlayer\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = 1.0 * len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(-length_opp_payer_moves/length_my_player_moves)", "def calculate_probability(self):\n return 0", "def pe_ratio(self):\n try:\n return self.price / self.dividend_yield\n except ZeroDivisionError:\n return 0.0", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def administer(self):\n\n score = 0.0\n for question in self.questions:\n if question.ask_and_evaluate() is True:\n score += 1\n return (score / len(self.questions)) * 100", "def rate(self) -> float:\n return self.success_cnt / self.total_cnt if self.total_cnt > 0 else 1.0", "def evaluate(game, player):\n weights = [2, 200, 2000, 20000]\n reward = 0\n opponent = get_opponent(player)\n for length in range(2, 6):\n reward += weights[length - 2] * get_num_series(game, player, length)\n reward -= weights[length - 2] * get_num_series(game, opponent, length)\n return reward", "def cal_hit_ratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n score = 0.0\n # golden items hit in the top_K items\n score_1 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)])) for i,d in top_k.groupby('user')])\n score_2 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)])) for i,d in top_k.groupby('user')])\n score = score_1 - score_2\n return score/full['user'].nunique()", "def get_improved_score_factor(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def points_per_dollar(self):\n if float(self.draftkings_salary) == 0.0:\n return 0.0\n\n return float(self.predicted_draftkings_points) / float(self.draftkings_salary)", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def custom_score(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n own_moves = game.get_legal_moves(player)\n\n return len(own_moves) / max(len(opp_moves), 1e-6)", "async def get_rob_percentage(level):\n chance = int(6 + (level // 10)) # first 10 levels is 6 for 30% chance\n if chance > 16:\n chance = 16\n return chance", "def expected_value(held_dice, num_die_sides, num_free_dice):\r\n \r\n scores = []\r\n \r\n die_sides = [(die + 1) for die in range(num_die_sides)]\r\n \r\n pos_outcomes = gen_all_sequences(die_sides, num_free_dice)\r\n\r\n for outcome in pos_outcomes:\r\n scores.append(score(held_dice + outcome))\r\n \r\n expected_result = float(sum(scores))/len(scores)\r\n \r\n return expected_result", "def determineAmountToCall(self, player):\n\t\treturn sum(self.currentBet) - sum(player.betAmount)", "def calculate_profit(self):", "def custom_score_6(game, player):\n \"\"\"custom_score_6 heuristic function aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - 1.5*length_opp_payer_moves*length_opp_payer_moves)", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def cash_ratio(self):\n return self.cash / self.current_liabilities", "def punch(self, a_fighter):\n points = int(uniform(0.7,1.0)*10*self.get_strength()/a_fighter.get_agility())\n a_fighter.__health_points = a_fighter.get_health_points() - points\n return a_fighter.__health_points", "def p(party, vote_count, s):\n return t(party, vote_count) / d(s)", "def get_real_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/self.votes", "def get_sharpe_ratio(allocs, prices):\n\tport_val = get_portfolio_value(prices, allocs, start_val=1.0)\n\tsharpe_ratio = get_portfolio_stats(port_val, daily_rf=0.0, samples_per_year=252)[3]\n\treturn -sharpe_ratio", "def defense(self):\n #return self.stats.dexterity + (self.stats.reiatsu * self.stats.density)\n return self.stats.defense", "def depiction_score(self):\n\n collision_penalty = 1\n degenerated_penalty = 0.4\n\n bond_collisions = self.count_bond_collisions()\n degenerated_atoms = self.count_suboptimal_atom_positions(0.0, 0.5)\n\n score = (\n collision_penalty * bond_collisions\n + degenerated_penalty * degenerated_atoms\n )\n\n return round(score, 1)", "def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)", "def heuristic_1_center(game, player) -> float:\n center_available_factor = get_center_available_factor(game, player)\n\n # Heuristic score output\n return float(center_available_factor)", "def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0", "def penalty(self):\n return 0", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n if game.is_winner(player):\n return float(\"inf\")\n\n # Aim to maximise your own available moves vs the opponent (Factor 2)\n\n opponent = game.get_opponent(player)\n return float(len(game.get_legal_moves(player)))-2.0*float(len(game.get_legal_moves(opponent)))", "def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = get_outcomes(num_die_sides)\n print \"outcomes:\", outcomes\n\n # generate all possible sequences of rolls\n all_rolls = list(gen_all_sequences(outcomes, num_free_dice))\n results = [max_repeats(roll) for roll in all_rolls]\n value = 0.0 \n\n\n for result in all_rolls:\n curr_hand = tuple(list(held_dice) + list(result))\n value += score(curr_hand)\n\n return value / len(all_rolls)", "def calc_win_lose_ratio(self):\n total = len(self.train_y)\n survived = 0\n for i in self.train_y:\n if i > 0:\n survived += 1\n\n self.survival_sum = [survived, total-survived]", "def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0", "def do_damage(self) -> float:\n res = 0.05 + self.experience / 100\n self.experience = self.experience + 1\n return res", "def getProduction(self, playerID):\n prod=0\n for p in self.__camps:\n if( p.getOwner() == playerID ):\n prod = prod + p.getGrowthrate()\n return prod", "def infected_ratio(self):\n if self.max_pop != 0:\n return int(self.infected_pop) / self.max_pop\n else:\n return 1", "def getScore(self):\n tempscore = 1000 - 0.01*self.timeDriving \n tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)\n tempscore += self.checkpoint *1000\n tempscore += self.laps * 1000 * len(self.maze.checkpoints)\n return tempscore", "def calculate_overall_rating(player_dict):\r\n if player_dict[\"position\"].upper() == \"QB\":\r\n throw_power = int(max(min(int(player_dict[\"throw_power\"]), 99), 70))\r\n throw_accuracy = int(max(min(math.ceil(\r\n ((2 * (\r\n int(player_dict[\"throw_accuracy_short\"]) + \r\n int(player_dict[\"throw_accuracy_mid\"]) + \r\n int(player_dict[\"throw_accuracy_deep\"]) + \r\n int(player_dict[\"throw_on_the_run\"]) + \r\n int(player_dict[\"playaction\"])\r\n )) - (2 * min(\r\n int(player_dict[\"throw_accuracy_short\"]), \r\n int(player_dict[\"throw_accuracy_mid\"]), \r\n int(player_dict[\"throw_accuracy_deep\"]), \r\n int(player_dict[\"throw_on_the_run\"]), \r\n int(player_dict[\"playaction\"])\r\n ))\r\n ) / 8\r\n ), 99), 60))\r\n break_tackles = int(max(min(\r\n math.ceil(((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 7), \r\n 90), 20))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 98), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((throw_power - 50.0) / 10.0) * 4.9\r\n overall_rating += ((throw_accuracy - 50.0) / 10.0) * 5.8\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.0\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"HB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 70), 25))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 50))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 50))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 0.33\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 2.0\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.6\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 75), 40))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 55))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 95), 60))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.0\r\n overall_rating += ((run_block - 50.0) / 10.0) * 7.2\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 1.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.0\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 39), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"WR\":\r\n break_tackles = int(max(min(\r\n math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2), \r\n 80), 35))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 75))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 35))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 65))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.3\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 4.75\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"TE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 95), 20))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 80), 35))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 35))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.65\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.65\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.65\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.25\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.25\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.4\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.2\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.2\r\n overall_rating += ((run_block - 50.0) / 10.0) * 5.4\r\n overall_rating = int(max(min((round(overall_rating) + 35), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LT\" or player_dict[\"position\"].upper() == \"RT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 0.8\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 4.75\r\n overall_rating += ((run_block - 50.0) / 10.0) * 3.75\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if (player_dict[\"position\"].upper() == \"LG\" or player_dict[\"position\"].upper() == \"RG\" or \r\n player_dict[\"position\"].upper() == \"C\"):\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.7\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.25\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.25\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 3.25\r\n overall_rating += ((run_block - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LE\" or player_dict[\"position\"].upper() == \"RE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 45))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.75\r\n overall_rating += ((awareness - 50.0) / 10.0) * 1.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.75\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 3.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"DT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 5.5\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.55\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LOLB\" or player_dict[\"position\"].upper() == \"ROLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 70))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 90), 20))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.6\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.4\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.3\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"MLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 65))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 5.2\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.65\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.75\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"CB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 40))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 40))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 85), 30))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.85\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.55\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.35\r\n overall_rating += ((catching - 50.0) / 10.0) * 3\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.55\r\n overall_rating += ((tackle - 50.0) / 10.0) * 1.55\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.5\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.5\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.0\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.5\r\n overall_rating += ((tackle - 50.0) / 10.0) * 2.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"SS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.2\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.7\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.7\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.2\r\n overall_rating += ((jumping - 50.0) / 10.0) * 0.9\r\n overall_rating += ((tackle - 50.0) / 10.0) * 3.2\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"K\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 35))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-177 + (0.218 * awareness) + (1.28 * kick_power) + (1.47 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"P\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 40))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-183 + (0.218 * awareness) + (1.5 * kick_power) + (1.33 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating", "def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n pacmanPos = currentGameState.getPacmanPosition()\n\n food = currentGameState.getFood()\n capsules = currentGameState.getCapsules()\n return currentGameState.getScore() - 10 * capsuleDistancePlan(pacmanPos, capsules) - foodDistPlan(pacmanPos, food)", "def get_fool_ratio(self, test_acc, attack_accs):\n return [round(100*((test_acc - attack_acc) / test_acc), 2) for attack_acc in attack_accs]", "def get_perf(self) :\n self.train()\n\n prediction = self.clf.predict(self.df_test.drop(columns = 'up')[:-1])\n self.accuracy = accuracy_score(df_test['up'][length:].values, prediction)\n tn, fp, fn, tp = confusion_matrix(df_test['up'][length:].values, prediction).ravel()\n self.recall = tp/(tp+fn)\n self.specificity = tn / (tn+fp)\n\n\n self.df_true = self.df_true[self.length:]\n\n profit = 1\n mini = 1\n maxi = 1\n self.df_true['close'] = self.df_true['close'].map(lambda x : np.exp(x))\n for s in range(1,len(self.df_true)):\n if prediction[x-1] == 1 :\n result = ((self.df_true['close'].iloc[s] -self.df_true['close'].iloc[s-1]) / self.df_true['close'].iloc[s-1]) + 1\n profit = profit * result\n if result < mini :\n mini = result\n if maxi < result :\n maxi = result\n self.mini = mini\n self.maxi = maxi\n self.profit = profit", "def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the improved score\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n improved = len(player_legal_moves) - len(opponent_legal_moves)\n if improved != 0:\n return float(improved)\n \n # Second get differences from center\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n player_coordinates = game.get_player_location(player)\n opponent_coordinates = game.get_player_location(opponent)\n player_center_dist = get_distances_from_center(center_coordinates, player_coordinates)\n opponent_center_dist = get_distances_from_center(center_coordinates, opponent_coordinates)\n center_dist_diff = player_center_dist - opponent_center_dist\n \n # Third obtain next_moves\n player_next_moves = [get_next_moves(game, move, list(move)) for move in player_legal_moves]\n opponent_next_moves = [get_next_moves(game, move, list(move)) for move in opponent_legal_moves] \n improved_next = len(player_next_moves) - len(opponent_next_moves)\n \n # Put player and opponent feature differences in a tuple/vector surrogoate\n feature_diff_vector = (improved, center_dist_diff, improved_next)\n \n # Provide a weighting vector for the features of each player-participant\n weight_vector = (1.5,0.1,1.0)\n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(feature_diff_vector, weight_vector))\n \n return float(weighted_difference_dot_product)", "def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)", "def betterEvaluationFunction(currentGameState):\n score = 0\n\n if currentGameState.isWin():\n score += 10000000\n elif currentGameState.isLose():\n score -= 10000000\n\n pacmanPos = currentGameState.getPacmanPosition()\n foodList = gridToList(currentGameState.getFood())\n\n closestFoodDistance = distanceToClosestFood(pacmanPos, foodList) \n\n ghostPoses = currentGameState.getGhostPositions()\n closestGhostDistance = distanceToClosestGhost(pacmanPos, ghostPoses)\n\n score = (10/closestFoodDistance) - closestGhostDistance*10 + currentGameState.getScore()\n return score", "def _cost_caught_by_police(self):\n if self.fine_frequency != 0:\n if self.number_of_courses % self.fine_frequency == 0 and self.number_of_courses != 0:\n if self.number_of_courses % self.fine_frequency_paid_by_driver == 0 and self.number_of_courses != 0:\n self.fine_paid_number_of_courses += 1\n fine_value = np.random.choice([100, 200, 500], p=[0.25, 0.4, 0.35])\n self.total_penalty_points += self._add_penalty_points() # adding penalty points\n return fine_value\n else:\n return 0\n else:\n return 0\n else:\n return 0", "def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate", "def set_total_reb_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n player_rebounds = bx[\"reb_def\"] + bx[\"reb_of\"]\n team_rebounds = team[\"reb_def\"] + team[\"reb_of\"]\n opp_team_rebounds = opp_team[\"reb_def\"] + opp_team[\"reb_of\"]\n result = 0.00\n try:\n if bx[\"minutes\"] > 0 and bx[\"minutes\"] > 0:\n result = ((player_rebounds * (team[\"minutes\"]/5)) / (bx[\"minutes\"] * (team_rebounds + opp_team_rebounds)))*100\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n\n self.total_reb_percentage = \"%.2f\" % round(result, 2)", "def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)", "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def heuristic_2_reflection(game, player) -> float:\n\n reflection_available_factor = get_reflection_available_factor(game, player)\n\n return float(reflection_available_factor)", "def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent", "def calculate_gpa(self):\n cur_node = self.head\n gpa = 0\n total_credits = 0\n while cur_node is not None:\n gpa += cur_node.data.grade() * cur_node.data.credit_hr()\n total_credits += cur_node.data.credit_hr()\n cur_node = cur_node.next\n if total_credits == 0:\n return 0\n return gpa / total_credits", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the base information to calculate player & opponent\n # feature values\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n if len(player_legal_moves) != len(opponent_legal_moves):\n return float(len(player_legal_moves) - len(opponent_legal_moves))\n \n # Get_center_coordinates and opponent. Then set the list of participants\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n participants = [player, opponent]\n \n # Then, for each participant obtain his/her feature values \n for participant in participants:\n if participant == player:\n p_legal_moves = player_legal_moves\n player_either = player\n participant_coordinates = p_y, p_x = \\\n game.get_player_location(participant)\n player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, \\\n player_path_count, player_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either,participant_coordinates, p_legal_moves)\n else:\n p_legal_moves = opponent_legal_moves\n player_either = opponent\n participant_coordinates = p_y, p_x \\\n = game.get_player_location(participant)\n opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, opponent_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either, participant_coordinates, p_legal_moves)\n \n # Place each participant's feature values in a tuple/vector surrogate \n pro_player_vector = \\\n (player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, player_path_count, \\\n opponent_min_center_diff)\n pro_opponent_vector = \\\n (opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, player_min_center_diff)\n \n # Provide a weighting vector for the features \n weight_vector = (1.5,0.1,1.0,0.001,0.001,0.001)\n \n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*(q-r ) for p,q,r \\\n in zip(weight_vector, pro_player_vector, pro_opponent_vector))\n \n return float(weighted_difference_dot_product)", "def get_points(self):\n self.round_points = 0\n for die in self.dice:\n if die == 1:\n self.round_points += 100\n elif die == 5:\n self.round_points += 50\n return self.round_points", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n\n if game.move_count < 15:\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)\n\n delta = 0\n\n moves = game.get_legal_moves()\n initial_moves_count = len(moves)\n indexes = np.random.permutation(initial_moves_count)\n\n for i in range(0, min(4, initial_moves_count)):\n first_level = True\n simulation = game.copy()\n\n while True:\n moves = simulation.get_legal_moves()\n moves_count = len(moves)\n if moves_count == 0:\n if simulation.is_winner(player):\n delta = delta + 1\n else:\n delta = delta - 1\n break\n if first_level:\n selected_move = indexes[i]\n first_level = False\n else:\n selected_move = random.randint(0, moves_count - 1)\n\n simulation.apply_move(moves[selected_move])\n\n return float(own_moves + delta) #float(own_moves - opp_moves + 5 * delta)\n\n #return float(own_moves - opp_moves + free_area_score(game, player) - free_area_score(game, game.get_opponent(player)))", "def get_percentage_f_votes(self):\n\n votes_f = self.get_num_f_votes()\n votes_sf = self.get_num_sf_votes()\n\n # avoid dividing by zero\n if votes_f + votes_sf == 0:\n return 0\n else:\n ratio = float(votes_f)/(votes_f + votes_sf)\n return round(ratio * 100, 1)", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def ucbScore(self,totalPlayedTimes):\n winRate = self.winRate()\n #print totalPlayedTimes\n #print self.playedTimes\n confidenceInterval = math.sqrt(2 * math.log(totalPlayedTimes,math.e) / self.playedTimes)\n \n return winRate + confidenceInterval", "def _evaluate_num_pieces(self, player):\n evaluation = 0\n if player is Player.black:\n evaluation += self.num_black_pieces * 10\n evaluation -= self.num_white_pieces * 10\n evaluation += self.num_black_kings * 10\n evaluation -= self.num_white_kings * 10\n elif player is Player.white:\n evaluation -= self.num_black_pieces * 10\n evaluation += self.num_white_pieces * 10\n evaluation -= self.num_black_kings * 10\n evaluation += self.num_white_kings * 10\n\n return evaluation", "def credits_earned(self):\n\n if self.grade() >= 69.5:\n return self.nCredits\n else:\n return 0.0", "def update_calculated_stats(self, player_data):\n # Updates calculated statistics\n fga = player_data['FGA']\n fgm = player_data['FGM']\n pa3 = player_data['3FGA']\n pm3 = player_data['3FGM']\n try:\n player_data['FG%'] = fgm/fga\n except:\n player_data['FG%'] = 0.0\n try:\n player_data['3FG%'] = pm3/pa3\n except:\n player_data['3FG%'] = 0.0\n return(player_data)", "def custom_score_7(game, player):\n \"\"\"custom_score_7 heuristic function also aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(1.5*length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)", "def get_excess(self) -> int:\n excess_to_min_treasury = self._treasury_balance.get() - self._treasury_min.get()\n auth_score = self.create_interface_score(self._game_auth_score.get(), AuthInterface)\n if not self._excess_smoothing_live.get():\n return excess_to_min_treasury - auth_score.get_excess()\n else:\n third_party_games_excess: int = 0\n games_excess = auth_score.get_todays_games_excess()\n for game in games_excess:\n third_party_games_excess += max(0, int(games_excess[game]))\n reward_pool = excess_to_min_treasury - third_party_games_excess * 20 // 100\n return reward_pool", "def winrate(matches):\n if not matches:\n print('no matches')\n return None\n\n win_loss = [match['result'] for match in matches]\n return sum(win_loss)/len(win_loss)", "def golden_ratio():\n return 1.61803398875", "def required_points(self):\n req_points = self.min_performance * self.initial_available_points()\n return np.maximum(0, np.int64(np.ceil(req_points)))", "def custom_score_5(game, player):\n \"\"\"custom_score_5 heuristic function defines chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)", "def cost(self) -> float:", "def improved_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def precision(self, user_list):\n hit = 0\n all_recom = 0\n print('Calculate precision: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n recom_item = set([data[0] for data in recom_data])\n user_item = set(\n self.test[self.test['userId'] == user]['movieId'].values)\n overlap = recom_item & user_item\n hit += len(overlap)\n all_recom += len(recom_item)\n print('\\nprecision is: ', hit / (all_recom * 1.0))\n return hit / (all_recom * 1.0)", "def betterEvaluationFunction(currentGameState: GameState):\n \"*** YOUR CODE HERE ***\"\n ghostScore : float = 1\n nearGhosts : float = 0\n foodScore : float = 0\n curScore = currentGameState.getScore()\n\n nearestFood = [(0, 0), float('inf')]\n pacPos = currentGameState.getPacmanPosition()\n foodPoss= currentGameState.getFood().asList()\n capsulePoss = currentGameState.getCapsules()\n ghostPoss = currentGameState.getGhostPositions()\n\n for foodPos in foodPoss:\n val = manhattanDistance(foodPos, pacPos)\n if val < nearestFood[1]:\n nearestFood[1] = val\n nearestFood[0] = foodPos\n foodScore = nearestFood[1]\n \n for gpos in ghostPoss:\n val = manhattanDistance(pacPos, gpos)\n if val <= 1:\n nearGhosts += (1-val)\n ghostScore += val\n\n return curScore - (1/ghostScore) + (1/foodScore) - nearGhosts - len(capsulePoss)", "def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def getRate(self) -> int:\n if (self._total_stake.get() + self._daily_reward.get()) == 0:\n rate = DENOMINATOR\n else:\n rate = (self._total_stake.get() + self._daily_reward.get()) * DENOMINATOR // self.sICX_score.totalSupply()\n return rate", "def custom_score_3(game, player):\n \"\"\"custom_score_3 heuristic function aims at maximizing win chances of my agent\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = 1.0 * len(game.get_legal_moves(player))#Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves with oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(length_my_player_moves/length_opp_payer_moves)", "def centre_priority_evaluate(self):\r\n evaluation = 0\r\n for player in range(2):\r\n player_sign = player * 2 - 1\r\n for i in range(4):\r\n score = i + 1\r\n evaluation += player_sign * score * count_bits(self.bitboard_king[player] &\r\n self.CENTRE_PRIORITY_BITMASKS[i])\r\n evaluation += player_sign * score * count_bits(self.bitboard_pawns[player] &\r\n self.CENTRE_PRIORITY_BITMASKS[i])\r\n return evaluation", "def dollars_per_point(self):\n if float(self.predicted_draftkings_points) == 0.0:\n return 0.0\n\n return float(self.draftkings_salary) / float(self.predicted_draftkings_points)", "def rate_club(user, club):\n if not user.is_authenticated():\n return None\n if not club.posel_set.exists():\n return None\n return sum(x[1] for x in rank_in_club(user, club)) / club.posel_set.count()", "def current_ratio(self):\n return self.current_assets / self.current_liabilities" ]
[ "0.6531963", "0.6401618", "0.61817557", "0.6132738", "0.6081823", "0.6071461", "0.60199296", "0.5966795", "0.5962311", "0.59621847", "0.5957963", "0.59393156", "0.59319943", "0.5922137", "0.5918801", "0.5913682", "0.5907549", "0.58991516", "0.5896585", "0.5889761", "0.5885497", "0.58792573", "0.5869189", "0.5865168", "0.58413595", "0.5838629", "0.5836382", "0.58291775", "0.5822119", "0.58184373", "0.58169717", "0.58143294", "0.5811706", "0.57919323", "0.578745", "0.57854813", "0.57723236", "0.5758267", "0.57561344", "0.57535964", "0.57506484", "0.57280684", "0.57116556", "0.5706946", "0.57020426", "0.569756", "0.5695808", "0.569299", "0.567984", "0.56726795", "0.5668198", "0.56649", "0.565448", "0.5652856", "0.56515527", "0.5643938", "0.5643929", "0.56411755", "0.5641122", "0.5640217", "0.56401664", "0.5636776", "0.5620967", "0.5619293", "0.5616671", "0.5614021", "0.5602146", "0.55925447", "0.55884004", "0.5577805", "0.5577222", "0.55698454", "0.5567678", "0.5565706", "0.55634207", "0.5562125", "0.55569094", "0.55520463", "0.5549593", "0.5549295", "0.5546663", "0.5546206", "0.5546096", "0.5545535", "0.55374354", "0.55356914", "0.55318314", "0.553074", "0.55285645", "0.552515", "0.5525104", "0.5524755", "0.5511058", "0.550978", "0.5507363", "0.5507355", "0.5506616", "0.5504503", "0.55024606", "0.5501506" ]
0.65123755
1