rem
stringlengths 1
322k
| add
stringlengths 0
2.05M
| context
stringlengths 4
228k
| meta
stringlengths 156
215
|
---|---|---|---|
derphi_a0 = phiprime(alpha) gc = gc + 1 if (phi_a0 <= phi0 + c1*alpha0*derphi0) \ and (abs(derphi_a0) <= c2*abs(derphi0)): return alpha0, fc, gc alpha0 = 0 alpha1 = 1 phi_a1 = phi_a0 | alpha1 = pymin(1.0,1.01*2*(phi0-old_old_fval)/derphi0) phi_a1 = phi(alpha1) derphi_a1 = phiprime(alpha1) | def phiprime(alpha): return Num.dot(fprime(xk+alpha*pk,*args),pk) | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
alpha_star, ifc, igc = zoom(alpha0, alpha1, phi_a0, phi_a1, derphi_a0, phi, phiprime, phi0, derphi0, c1, c2) gc = gc + igc fc = fc + ifc | alpha_star, fval_star = zoom(alpha0, alpha1, phi_a0, phi_a1, derphi_a0, phi, phiprime, phi0, derphi0, c1, c2) | def phiprime(alpha): return Num.dot(fprime(xk+alpha*pk,*args),pk) | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
gc = gc + 1 | def phiprime(alpha): return Num.dot(fprime(xk+alpha*pk,*args),pk) | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
|
alpha_star, ifc, igc = zoom(alpha1, alpha0, phi_a1, phi_a0, derphi_a1, phi, phiprime, phi0, derphi0, c1, c2) gc = gc + igc fc = fc + ifc | alpha_star, fval_star = zoom(alpha1, alpha0, phi_a1, phi_a0, derphi_a1, phi, phiprime, phi0, derphi0, c1, c2) | def phiprime(alpha): return Num.dot(fprime(xk+alpha*pk,*args),pk) | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
phi_a1 = f(xk+alpha1*pk,*args) fc = fc + 1 | phi_a1 = phi(alpha1) | def phiprime(alpha): return Num.dot(fprime(xk+alpha*pk,*args),pk) | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
return alpha_star, fc, gc def line_search_BFGS(f, xk, pk, gfk, args=(), c1=1e-4, alpha0=1): | return alpha_star, fc, gc, fval_star, old_fval def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): | def phiprime(alpha): return Num.dot(fprime(xk+alpha*pk,*args),pk) | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
phi0 = apply(f,(xk,)+args) | phi0 = old_fval | def line_search_BFGS(f, xk, pk, gfk, args=(), c1=1e-4, alpha0=1): """Minimize over alpha, the function f(xk+alpha pk) Uses the interpolation algorithm (Armiijo backtracking) as suggested by Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57 Outputs: (alpha, fc, gc) """ fc = 0 phi0 = apply(f,(xk,)+args) # compute f(xk) phi_a0 = apply(f,(xk+alpha0*pk,)+args) # compute f fc = fc + 2 derphi0 = Num.dot(gfk,pk) if (phi_a0 <= phi0 + c1*alpha0*derphi0): return alpha0, fc, 0 # Otherwise compute the minimizer of a quadratic interpolant: alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0) phi_a1 = apply(f,(xk+alpha1*pk,)+args) fc = fc + 1 if (phi_a1 <= phi0 + c1*alpha1*derphi0): return alpha1, fc, 0 # Otherwise loop with cubic interpolation until we find an alpha which # satifies the first Wolfe condition (since we are backtracking, we will # assume that the value of alpha is not too small and satisfies the second # condition. while 1: # we are assuming pk is a descent direction factor = alpha0**2 * alpha1**2 * (alpha1-alpha0) a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \ alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0) a = a / factor b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \ alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0) b = b / factor alpha2 = (-b + Num.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a) phi_a2 = apply(f,(xk+alpha2*pk,)+args) fc = fc + 1 if (phi_a2 <= phi0 + c1*alpha2*derphi0): return alpha2, fc, 0 if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96: alpha2 = alpha1 / 2.0 alpha0 = alpha1 alpha1 = alpha2 phi_a0 = phi_a1 phi_a1 = phi_a2 | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
fc = fc + 2 | fc = fc + 1 | def line_search_BFGS(f, xk, pk, gfk, args=(), c1=1e-4, alpha0=1): """Minimize over alpha, the function f(xk+alpha pk) Uses the interpolation algorithm (Armiijo backtracking) as suggested by Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57 Outputs: (alpha, fc, gc) """ fc = 0 phi0 = apply(f,(xk,)+args) # compute f(xk) phi_a0 = apply(f,(xk+alpha0*pk,)+args) # compute f fc = fc + 2 derphi0 = Num.dot(gfk,pk) if (phi_a0 <= phi0 + c1*alpha0*derphi0): return alpha0, fc, 0 # Otherwise compute the minimizer of a quadratic interpolant: alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0) phi_a1 = apply(f,(xk+alpha1*pk,)+args) fc = fc + 1 if (phi_a1 <= phi0 + c1*alpha1*derphi0): return alpha1, fc, 0 # Otherwise loop with cubic interpolation until we find an alpha which # satifies the first Wolfe condition (since we are backtracking, we will # assume that the value of alpha is not too small and satisfies the second # condition. while 1: # we are assuming pk is a descent direction factor = alpha0**2 * alpha1**2 * (alpha1-alpha0) a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \ alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0) a = a / factor b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \ alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0) b = b / factor alpha2 = (-b + Num.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a) phi_a2 = apply(f,(xk+alpha2*pk,)+args) fc = fc + 1 if (phi_a2 <= phi0 + c1*alpha2*derphi0): return alpha2, fc, 0 if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96: alpha2 = alpha1 / 2.0 alpha0 = alpha1 alpha1 = alpha2 phi_a0 = phi_a1 phi_a1 = phi_a2 | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
return alpha0, fc, 0 | return alpha0, fc, 0, phi_a0 | def line_search_BFGS(f, xk, pk, gfk, args=(), c1=1e-4, alpha0=1): """Minimize over alpha, the function f(xk+alpha pk) Uses the interpolation algorithm (Armiijo backtracking) as suggested by Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57 Outputs: (alpha, fc, gc) """ fc = 0 phi0 = apply(f,(xk,)+args) # compute f(xk) phi_a0 = apply(f,(xk+alpha0*pk,)+args) # compute f fc = fc + 2 derphi0 = Num.dot(gfk,pk) if (phi_a0 <= phi0 + c1*alpha0*derphi0): return alpha0, fc, 0 # Otherwise compute the minimizer of a quadratic interpolant: alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0) phi_a1 = apply(f,(xk+alpha1*pk,)+args) fc = fc + 1 if (phi_a1 <= phi0 + c1*alpha1*derphi0): return alpha1, fc, 0 # Otherwise loop with cubic interpolation until we find an alpha which # satifies the first Wolfe condition (since we are backtracking, we will # assume that the value of alpha is not too small and satisfies the second # condition. while 1: # we are assuming pk is a descent direction factor = alpha0**2 * alpha1**2 * (alpha1-alpha0) a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \ alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0) a = a / factor b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \ alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0) b = b / factor alpha2 = (-b + Num.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a) phi_a2 = apply(f,(xk+alpha2*pk,)+args) fc = fc + 1 if (phi_a2 <= phi0 + c1*alpha2*derphi0): return alpha2, fc, 0 if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96: alpha2 = alpha1 / 2.0 alpha0 = alpha1 alpha1 = alpha2 phi_a0 = phi_a1 phi_a1 = phi_a2 | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
return alpha1, fc, 0 | return alpha1, fc, 0, phi_a1 | def line_search_BFGS(f, xk, pk, gfk, args=(), c1=1e-4, alpha0=1): """Minimize over alpha, the function f(xk+alpha pk) Uses the interpolation algorithm (Armiijo backtracking) as suggested by Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57 Outputs: (alpha, fc, gc) """ fc = 0 phi0 = apply(f,(xk,)+args) # compute f(xk) phi_a0 = apply(f,(xk+alpha0*pk,)+args) # compute f fc = fc + 2 derphi0 = Num.dot(gfk,pk) if (phi_a0 <= phi0 + c1*alpha0*derphi0): return alpha0, fc, 0 # Otherwise compute the minimizer of a quadratic interpolant: alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0) phi_a1 = apply(f,(xk+alpha1*pk,)+args) fc = fc + 1 if (phi_a1 <= phi0 + c1*alpha1*derphi0): return alpha1, fc, 0 # Otherwise loop with cubic interpolation until we find an alpha which # satifies the first Wolfe condition (since we are backtracking, we will # assume that the value of alpha is not too small and satisfies the second # condition. while 1: # we are assuming pk is a descent direction factor = alpha0**2 * alpha1**2 * (alpha1-alpha0) a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \ alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0) a = a / factor b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \ alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0) b = b / factor alpha2 = (-b + Num.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a) phi_a2 = apply(f,(xk+alpha2*pk,)+args) fc = fc + 1 if (phi_a2 <= phi0 + c1*alpha2*derphi0): return alpha2, fc, 0 if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96: alpha2 = alpha1 / 2.0 alpha0 = alpha1 alpha1 = alpha2 phi_a0 = phi_a1 phi_a1 = phi_a2 | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
return alpha2, fc, 0 | return alpha2, fc, 0, phi_a2 | def line_search_BFGS(f, xk, pk, gfk, args=(), c1=1e-4, alpha0=1): """Minimize over alpha, the function f(xk+alpha pk) Uses the interpolation algorithm (Armiijo backtracking) as suggested by Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57 Outputs: (alpha, fc, gc) """ fc = 0 phi0 = apply(f,(xk,)+args) # compute f(xk) phi_a0 = apply(f,(xk+alpha0*pk,)+args) # compute f fc = fc + 2 derphi0 = Num.dot(gfk,pk) if (phi_a0 <= phi0 + c1*alpha0*derphi0): return alpha0, fc, 0 # Otherwise compute the minimizer of a quadratic interpolant: alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0) phi_a1 = apply(f,(xk+alpha1*pk,)+args) fc = fc + 1 if (phi_a1 <= phi0 + c1*alpha1*derphi0): return alpha1, fc, 0 # Otherwise loop with cubic interpolation until we find an alpha which # satifies the first Wolfe condition (since we are backtracking, we will # assume that the value of alpha is not too small and satisfies the second # condition. while 1: # we are assuming pk is a descent direction factor = alpha0**2 * alpha1**2 * (alpha1-alpha0) a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \ alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0) a = a / factor b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \ alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0) b = b / factor alpha2 = (-b + Num.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0*a) phi_a2 = apply(f,(xk+alpha2*pk,)+args) fc = fc + 1 if (phi_a2 <= phi0 + c1*alpha2*derphi0): return alpha2, fc, 0 if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96: alpha2 = alpha1 / 2.0 alpha0 = alpha1 alpha1 = alpha2 phi_a0 = phi_a1 phi_a1 = phi_a2 | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
alpha_k, fc, gc = line_search_BFGS(f,xk,pk,gfk,args) | alpha_k, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) | def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=1e-8, maxiter=None, full_output=0, disp=1): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol I = MLab.eye(N) Hk = I if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) grad_calls = grad_calls + 1 xk = x0 sk = [2*gtol] warnflag = 0 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc = line_search_BFGS(f,xk,pk,gfk,args) func_calls = func_calls + fc xkp1 = xk + alpha_k * pk sk = xkp1 - xk xk = xkp1 if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + gc + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = apply(f,(xk,)+args) if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: return xk, fval, func_calls, grad_calls, warnflag else: return xk | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
def fmin_cg(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=1e-8, maxiter=None, full_output=0, disp=1): """Minimize a function with nonlinear conjugate gradient algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the nonlinear conjugate gradient algorithm of Polak and Ribiere See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) grad_calls = grad_calls + 1 xk = x0 sk = [2*gtol] warnflag = 0 pk = -gfk while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): deltak = Num.dot(gfk,gfk) alpha_k, fc, gc = line_search_BFGS(f,xk,pk,gfk,args) func_calls = func_calls + fc xk = xk + alpha_k * pk if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + gc + 1 yk = gfkp1 - gfk beta_k = max(0,Num.dot(yk,gfkp1)/deltak) pk = -gfkp1 + beta_k * pk gfk = gfkp1 k = k + 1 if disp or full_output: fval = apply(f,(xk,)+args) if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: return xk, fval, func_calls, grad_calls, warnflag else: return xk | def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=1e-8, maxiter=None, full_output=0, disp=1): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}) xopt -- the minimizer of f. fopt -- the value of f(xopt). func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol I = MLab.eye(N) Hk = I if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) grad_calls = grad_calls + 1 xk = x0 sk = [2*gtol] warnflag = 0 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc = line_search_BFGS(f,xk,pk,gfk,args) func_calls = func_calls + fc xkp1 = xk + alpha_k * pk sk = xkp1 - xk xk = xkp1 if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + gc + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + gc + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = apply(f,(xk,)+args) if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: return xk, fval, func_calls, grad_calls, warnflag else: return xk | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
|
alphak, fc, gc = line_search_BFGS(f,xk,pk,gfk,args) | alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval,args) | def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=1e-8, maxiter=None, full_output=0, disp=1): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 k = 0 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = 0 ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,espilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if (curv <= 0): if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc = line_search_BFGS(f,xk,pk,gfk,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update k = k + 1 if disp or full_output: fval = apply(f,(xk,)+args) if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: return xk, fval, fcalls, gcalls, hcalls, warnflag else: return xk | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
fval = apply(f,(xk,)+args) | fval = old_fval | def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=1e-8, maxiter=None, full_output=0, disp=1): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls = 0 gcalls = 0 hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 k = 0 while (Num.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -apply(fprime,(xk,)+args) gcalls = gcalls + 1 maggrad = Num.add.reduce(abs(b)) eta = min([0.5,Num.sqrt(maggrad)]) termcond = eta * maggrad xsupi = 0 ri = -b psupi = -ri i = 0 dri0 = Num.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while Num.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = apply(approx_fhess_p,(xk,psupi,fprime,espilon)+args) gcalls = gcalls + 2 else: Ap = apply(fhess_p,(xk,psupi)+args) hcalls = hcalls + 1 else: Ap = Num.dot(A,psupi) # check curvature curv = Num.dot(psupi,Ap) if (curv <= 0): if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = Num.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update Num.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc = line_search_BFGS(f,xk,pk,gfk,args) fcalls = fcalls + fc gcalls = gcalls + gc update = alphak * pk xk = xk + update k = k + 1 if disp or full_output: fval = apply(f,(xk,)+args) if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls print " Gradient evaluations: %d" % gcalls print " Hessian evaluations: %d" % hcalls if full_output: return xk, fval, fcalls, gcalls, hcalls, warnflag else: return xk | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
start = time.time() x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=80) print x times.append(time.time() - start) algor.append('Nonlinear CG\t') | def _scalarfunc(*params): params = squeeze(asarray(params)) return func(params,*args) | d00f433519cbe9ce0d4306fb9512ac79d655e1fc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d00f433519cbe9ce0d4306fb9512ac79d655e1fc/optimize.py |
|
if len(row) == 0 and len(seq) == self.shape[1]: nonzeros = [ind for ind, xi in enumerate(x) if xi != 0] x = [x[ind] for ind in nonzeros] row[:] = nonzeros self.data[i] = x else: for k, col in enumerate(seq): self[i, col] = x[k] | for k, col in enumerate(seq): self[i, col] = x[k] | def __setitem__(self, index, x): try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" else: if isinstance(i, slice): seq = xrange(i.start or 0, i.stop or self.shape[1], i.step or 1) elif operator.isSequenceType(i): seq = i else: raise IndexError, "invalid index" try: if not len(x) == len(seq): raise ValueError, "number of elements in source must be" \ " same as number of elements in destimation" except TypeError: # Either x or seq is not a sequence. Note that a sparse matrix # is also not a sequence under this definition. # Currently we don't support setting to/from non-sequence types. # This could be enhanced, though, to allow a scalar source, # and/or a sparse vector. raise TypeError, "unsupported type for lil_matrix.__setitem__" else: # Sequence: call __setitem__ recursively, once for each row for i in xrange(len(seq)): self[seq[i], index[1]] = x[i] return | 8f4e66c458a41ba941e436146e9a9e7df93cdc46 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/8f4e66c458a41ba941e436146e9a9e7df93cdc46/sparse.py |
mu = 1/(exp(lambda_))-1) | mu = 1/(exp(lambda_)-1) | def _stats(self, lambda_): m2, m1 = arr(lambda_) mu = 1/(exp(lambda_))-1) var = exp(-lambda_)/(1-exp(-lambda))**2 g1 = 2*cosh(lambda_/2.0) g2 = 4+2*cosh(lambda_) return mu, var, g1, g2 | ccb0a69b78ea57818837f989851ed5bf4309a175 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/ccb0a69b78ea57818837f989851ed5bf4309a175/distributions.py |
cond = c+0*val1+0*val2 | cond = c+0*val1 | def _ppf(self, q, a, c): val1 = special.gammaincinv(a,q) val2 = special.gammaincinv(a,1.0-q) ic = 1.0/c cond = c+0*val1+0*val2 return where(cond > 0,val1**ic,val2**ic) | abc8537bacaa811a759c7d399b68a35ec64aa647 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/abc8537bacaa811a759c7d399b68a35ec64aa647/distributions.py |
f = open(source,'w') | f = open(target,'w') | def get_clapack_source(ext, build_dir): name = ext.name.split('.')[-1] assert name=='clapack',`name` if atlas_version is None: target = os.path.join(build_dir,target_dir,'clapack.pyf') from distutils.dep_util import newer if newer(__file__,target): f = open(source,'w') f.write(tmpl_empty_clapack_pyf) f.close() else: target = ext.depends[0] assert os.path.basename(target)=='clapack.pyf.src' return target | 40bc33ebd624fc0d82e81085df9c2afab5cf1e58 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/40bc33ebd624fc0d82e81085df9c2afab5cf1e58/setup_lapack.py |
if nest is None: nest=m/2 | if nest is None: nest=m+2*k | def splprep(x,w=None,u=None,ub=None,ue=None,k=3,task=0,s=None,t=None, full_output=0,nest=None,per=0,quiet=1): """Find the B-spline representation of an N-dimensional curve. Description: Given a list of N rank-1 arrays, x, which represent a curve in N-dimensional space parametrized by u, find a smooth approximating spline curve g(u). Uses the FORTRAN routine parcur from FITPACK Inputs: x -- A list of sample vector arrays representing the curve. u -- An array of parameter values. If not given, these values are calculated automatically as (M = len(x[0])): v[0] = 0 v[i] = v[i-1] + distance(x[i],x[i-1]) u[i] = v[i] / v[M-1] ub, ue -- The end-points of the parameters interval. Defaults to u[0] and u[-1]. k -- Degree of the spline. Cubic splines are recommended. Even values of k should be avoided especially with a small s-value. 1 <= k <= 5. task -- If task==0 find t and c for a given smoothing factor, s. If task==1 find t and c for another value of the smoothing factor, s. There must have been a previous call with task=0 or task=1 for the same set of data. If task=-1 find the weighted least square spline for a given set of knots, t. s -- A smoothing condition. The amount of smoothness is determined by satisfying the conditions: sum((w * (y - g))**2) <= s where g(x) is the smoothed interpolation of (x,y). The user can use s to control the tradeoff between closeness and smoothness of fit. Larger s means more smoothing while smaller values of s indicate less smoothing. Recommended values of s depend on the weights, w. If the weights represent the inverse of the standard-deviation of y, then a good s value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is the number of datapoints in x, y, and w. t -- The knots needed for task=-1. full_output -- If non-zero, then return optional outputs. nest -- An over-estimate of the total number of knots of the spline to help in determining the storage space. By default nest=m/2. Always large enough is nest=m+k+1. per -- If non-zero, data points are considered periodic with period x[m-1] - x[0] and a smooth periodic spline approximation is returned. Values of y[m-1] and w[m-1] are not used. quiet -- Non-zero to suppress messages. Outputs: (tck, u, {fp, ier, msg}) tck -- (t,c,k) a tuple containing the vector of knots, the B-spline coefficients, and the degree of the spline. u -- An array of the values of the parameter. fp -- The weighted sum of squared residuals of the spline approximation. ier -- An integer flag about splrep success. Success is indicated if ier<=0. If ier in [1,2,3] an error occurred but was not raised. Otherwise an error is raised. msg -- A message corresponding to the integer flag, ier. Remarks: SEE splev for evaluation of the spline and its derivatives. """ if task<=0: _parcur_cache = {'t': array([],'d'), 'wrk': array([],'d'), 'iwrk':array([],'i'),'u': array([],'d'),'ub':0,'ue':1} x=myasarray(x) idim,m=x.shape if per: for i in range(idim): if x[i][0]!=x[i][-1]: if quiet<2:print 'Warning: Setting x[%d][%d]=x[%d][0]'%(i,m,i) x[i][-1]=x[i][0] if not 0<idim<11: raise TypeError,'0<idim<11 must hold' if w is None: w=ones(m,'d') else: w=myasarray(w) ipar=(u is not None) if ipar: _parcur_cache['u']=u if ub is None: _parcur_cache['ub']=u[0] else: _parcur_cache['ub']=ub if ue is None: _parcur_cache['ue']=u[-1] else: _parcur_cache['ue']=ue else: _parcur_cache['u']=zeros(m,'d') if not (1<=k<=5): raise TypeError, '1<=k=%d<=5 must hold'%(k) if not (-1<=task<=1): raise TypeError, 'task must be either -1,0, or 1' if (not len(w)==m) or (ipar==1 and (not len(u)==m)): raise TypeError,'Mismatch of input dimensions' if s is None: s=m-sqrt(2*m) if t is None and task==-1: raise TypeError, 'Knots must be given for task=-1' if t is not None: _parcur_cache['t']=myasarray(t) n=len(_parcur_cache['t']) if task==-1 and n<2*k+2: raise TypeError, 'There must be at least 2*k+2 knots for task=-1' if m<=k: raise TypeError, 'm>k must hold' if nest is None: nest=m/2 if (task>=0 and s==0) or (nest<0): if per: nest=m+2*k else: nest=m+k+1 nest=max(nest,2*k+3) u=_parcur_cache['u'] ub=_parcur_cache['ub'] ue=_parcur_cache['ue'] t=_parcur_cache['t'] wrk=_parcur_cache['wrk'] iwrk=_parcur_cache['iwrk'] t,c,o=_fitpack._parcur(ravel(transpose(x)),w,u,ub,ue,k,task,ipar,s,t, nest,wrk,iwrk,per) _parcur_cache['u']=o['u'] _parcur_cache['ub']=o['ub'] _parcur_cache['ue']=o['ue'] _parcur_cache['t']=t _parcur_cache['wrk']=o['wrk'] _parcur_cache['iwrk']=o['iwrk'] ier,fp,n=o['ier'],o['fp'],len(t) u=o['u'] c.shape=idim,n-k-1 tcku = [t,list(c),k],u if ier<=0 and not quiet: print _iermess[ier][0] print "\tk=%d n=%d m=%d fp=%f s=%f"%(k,len(t),m,fp,s) if ier>0 and not full_output: if ier in [1,2,3]: print "Warning: "+_iermess[ier][0] else: try: raise _iermess[ier][1],_iermess[ier][0] except KeyError: raise _iermess['unknown'][1],_iermess['unknown'][0] if full_output: try: return tcku,fp,ier,_iermess[ier][0] except KeyError: return tcku,fp,ier,_iermess['unknown'][0] else: return tcku | 8db610b5e4af19ed171e7489fe6046b0f314615a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/8db610b5e4af19ed171e7489fe6046b0f314615a/fitpack.py |
x = asarray(x) | def plot(x,*args,**keywds): """Plot curves. Description: Plot one or more curves on the same graph. Inputs: There can be a variable number of inputs which consist of pairs or triples. The second variable is plotted against the first using the linetype specified by the optional third variable in the triple. If only two plots are being compared, the x-axis does not have to be repeated. """ x = asarray(x) try: override = 1 savesys = gist.plsys(2) gist.plsys(savesys) except: override = 0 global _hold if "hold" in keywds.keys(): _hold = keywds['hold'] if _hold or override: pass else: gist.fma() gist.animate(0) nargs = len(args) if nargs == 0: y = x x = Numeric.arange(0,len(y)) if y.typecode() in ['F','D']: print "Warning: complex data plotting real part." y = y.real gist.plg(y,x,type='solid',color='blue',marks=0) return y = args[0] argpos = 1 nowplotting = 0 clear_global_linetype() while 1: try: thearg = args[argpos] except IndexError: thearg = 0 thetype,thecolor,themarker,tomark = _parse_type_arg(thearg,nowplotting) if themarker == 'Z': # args[argpos] was data or non-existent. pass append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]) else: # args[argpos] was a string argpos = argpos + 1 if tomark: append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]+_rmarkers[themarker]) else: append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]) if scipy.array_iscomplex(x) or scipy.array_iscomplex(y): print "Warning: complex data provided, using only real part." x = scipy.real(x) y = scipy.real(y) gist.plg(y,x,type=thetype,color=thecolor,marker=themarker,marks=tomark) nowplotting = nowplotting + 1 ## Argpos is pointing to the next potential triple of data. ## Now one of four things can happen: ## ## 1: argpos points to data, argpos+1 is a string ## 2: argpos points to data, end ## 3: argpos points to data, argpos+1 is data ## 4: argpos points to data, argpos+1 is data, argpos+2 is a string if argpos >= nargs: break # no more data if argpos == nargs-1: # this is a single data value. x = x y = args[argpos] argpos = argpos+1 elif type(args[argpos+1]) is types.StringType: x = x y = args[argpos] argpos = argpos+1 else: # 3 x = args[argpos] y = args[argpos+1] argpos = argpos+2 return | 65c5bf346df3d9775ed2b57ef9fbcebd5c054ac9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/65c5bf346df3d9775ed2b57ef9fbcebd5c054ac9/Mplot.py |
|
if y.typecode() in ['F','D']: | if scipy.array_iscomplex(y): | def plot(x,*args,**keywds): """Plot curves. Description: Plot one or more curves on the same graph. Inputs: There can be a variable number of inputs which consist of pairs or triples. The second variable is plotted against the first using the linetype specified by the optional third variable in the triple. If only two plots are being compared, the x-axis does not have to be repeated. """ x = asarray(x) try: override = 1 savesys = gist.plsys(2) gist.plsys(savesys) except: override = 0 global _hold if "hold" in keywds.keys(): _hold = keywds['hold'] if _hold or override: pass else: gist.fma() gist.animate(0) nargs = len(args) if nargs == 0: y = x x = Numeric.arange(0,len(y)) if y.typecode() in ['F','D']: print "Warning: complex data plotting real part." y = y.real gist.plg(y,x,type='solid',color='blue',marks=0) return y = args[0] argpos = 1 nowplotting = 0 clear_global_linetype() while 1: try: thearg = args[argpos] except IndexError: thearg = 0 thetype,thecolor,themarker,tomark = _parse_type_arg(thearg,nowplotting) if themarker == 'Z': # args[argpos] was data or non-existent. pass append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]) else: # args[argpos] was a string argpos = argpos + 1 if tomark: append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]+_rmarkers[themarker]) else: append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]) if scipy.array_iscomplex(x) or scipy.array_iscomplex(y): print "Warning: complex data provided, using only real part." x = scipy.real(x) y = scipy.real(y) gist.plg(y,x,type=thetype,color=thecolor,marker=themarker,marks=tomark) nowplotting = nowplotting + 1 ## Argpos is pointing to the next potential triple of data. ## Now one of four things can happen: ## ## 1: argpos points to data, argpos+1 is a string ## 2: argpos points to data, end ## 3: argpos points to data, argpos+1 is data ## 4: argpos points to data, argpos+1 is data, argpos+2 is a string if argpos >= nargs: break # no more data if argpos == nargs-1: # this is a single data value. x = x y = args[argpos] argpos = argpos+1 elif type(args[argpos+1]) is types.StringType: x = x y = args[argpos] argpos = argpos+1 else: # 3 x = args[argpos] y = args[argpos+1] argpos = argpos+2 return | 65c5bf346df3d9775ed2b57ef9fbcebd5c054ac9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/65c5bf346df3d9775ed2b57ef9fbcebd5c054ac9/Mplot.py |
type = 'b' | type = 'B' | def fromimage(im, flatten=0): """Takes a PIL image and returns a copy of the image in a Numeric container. If the image is RGB returns a 3-dimensional array: arr[:,:,n] is each channel Optional arguments: - flatten (0): if true, the image is flattened by calling convert('F') on the image object before extracting the numerical data. This flattens the color layers into a single grayscale layer. Note that the supplied image object is NOT modified. """ assert Image.isImageType(im), "Not a PIL image." if flatten: im = im.convert('F') mode = im.mode adjust = 0 if mode == '1': im = im.convert(mode='L') mode = 'L' adjust = 1 str = im.tostring() type = 'b' if mode == 'F': type = 'f' if mode == 'I': type = 'i' arr = Numeric.fromstring(str,type) shape = list(im.size) shape.reverse() if mode == 'P': arr.shape = shape if im.palette.rawmode != 'RGB': print "Warning: Image has invalid palette." return arr pal = Numeric.fromstring(im.palette.data,type) N = len(pal) pal.shape = (int(N/3.0),3) return arr, pal if mode in ['RGB','YCbCr']: shape += [3] elif mode in ['CMYK','RGBA']: shape += [4] arr.shape = shape if adjust: arr = (arr != 0) return arr | a785a7f292974deb7696ea9b39f9194c356e7756 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a785a7f292974deb7696ea9b39f9194c356e7756/pilutil.py |
type = 'i' | type = 'I' | def fromimage(im, flatten=0): """Takes a PIL image and returns a copy of the image in a Numeric container. If the image is RGB returns a 3-dimensional array: arr[:,:,n] is each channel Optional arguments: - flatten (0): if true, the image is flattened by calling convert('F') on the image object before extracting the numerical data. This flattens the color layers into a single grayscale layer. Note that the supplied image object is NOT modified. """ assert Image.isImageType(im), "Not a PIL image." if flatten: im = im.convert('F') mode = im.mode adjust = 0 if mode == '1': im = im.convert(mode='L') mode = 'L' adjust = 1 str = im.tostring() type = 'b' if mode == 'F': type = 'f' if mode == 'I': type = 'i' arr = Numeric.fromstring(str,type) shape = list(im.size) shape.reverse() if mode == 'P': arr.shape = shape if im.palette.rawmode != 'RGB': print "Warning: Image has invalid palette." return arr pal = Numeric.fromstring(im.palette.data,type) N = len(pal) pal.shape = (int(N/3.0),3) return arr, pal if mode in ['RGB','YCbCr']: shape += [3] elif mode in ['CMYK','RGBA']: shape += [4] arr.shape = shape if adjust: arr = (arr != 0) return arr | a785a7f292974deb7696ea9b39f9194c356e7756 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a785a7f292974deb7696ea9b39f9194c356e7756/pilutil.py |
pal = arange(0,256,1,dtype='b')[:,NewAxis] * \ ones((3,),dtype='b')[NewAxis,:] | pal = arange(0,256,1,dtype='B')[:,NewAxis] * \ ones((3,),dtype='B')[NewAxis,:] | def toimage(arr,high=255,low=0,cmin=None,cmax=None,pal=None, mode=None,channel_axis=None): """Takes a Numeric array and returns a PIL image. The mode of the PIL image depends on the array shape, the pal keyword, and the mode keyword. For 2-D arrays, if pal is a valid (N,3) byte-array giving the RGB values (from 0 to 255) then mode='P', otherwise mode='L', unless mode is given as 'F' or 'I' in which case a float and/or integer array is made For 3-D arrays, the channel_axis argument tells which dimension of the array holds the channel data. For 3-D arrays if one of the dimensions is 3, the mode is 'RGB' by default or 'YCbCr' if selected. if the The Numeric array must be either 2 dimensional or 3 dimensional. """ data = asarray(arr) if iscomplexobj(data): raise ValueError, "Cannot convert a complex-valued array." shape = list(data.shape) valid = len(shape)==2 or ((len(shape)==3) and \ ((3 in shape) or (4 in shape))) assert valid, "Not a suitable array shape for any mode." if len(shape) == 2: shape = (shape[1],shape[0]) # columns show up first if mode == 'F': image = Image.fromstring(mode,shape,data.astype('f').tostring()) return image if mode in [None, 'L', 'P']: bytedata = bytescale(data,high=high,low=low,cmin=cmin,cmax=cmax) image = Image.fromstring('L',shape,bytedata.tostring()) if pal is not None: image.putpalette(asarray(pal,dtype=_UInt8).tostring()) # Becomes a mode='P' automagically. elif mode == 'P': # default gray-scale pal = arange(0,256,1,dtype='b')[:,NewAxis] * \ ones((3,),dtype='b')[NewAxis,:] image.putpalette(asarray(pal,dtype=_UInt8).tostring()) return image if mode == '1': # high input gives threshold for 1 bytedata = ((data > high)*255).astype('b') image = Image.fromstring('L',shape,bytedata.tostring()) image = image.convert(mode='1') return image if cmin is None: cmin = amin(ravel(data)) if cmax is None: cmax = amax(ravel(data)) data = (data*1.0 - cmin)*(high-low)/(cmax-cmin) + low if mode == 'I': image = Image.fromstring(mode,shape,data.astype('i').tostring()) else: raise ValueError, _errstr return image # if here then 3-d array with a 3 or a 4 in the shape length. # Check for 3 in datacube shape --- 'RGB' or 'YCbCr' if channel_axis is None: if (3 in shape): ca = Numeric.nonzero(asarray(shape) == 3)[0] else: ca = Numeric.nonzero(asarray(shape) == 4) if len(ca): ca = ca[0] else: raise ValueError, "Could not find channel dimension." else: ca = channel_axis numch = shape[ca] if numch not in [3,4]: raise ValueError, "Channel axis dimension is not valid." bytedata = bytescale(data,high=high,low=low,cmin=cmin,cmax=cmax) if ca == 2: strdata = bytedata.tostring() shape = (shape[1],shape[0]) elif ca == 1: strdata = transpose(bytedata,(0,2,1)).tostring() shape = (shape[2],shape[0]) elif ca == 0: strdata = transpose(bytedata,(1,2,0)).tostring() shape = (shape[2],shape[1]) if mode is None: if numch == 3: mode = 'RGB' else: mode = 'RGBA' if mode not in ['RGB','RGBA','YCbCr','CMYK']: raise ValueError, _errstr if mode in ['RGB', 'YCbCr']: assert numch == 3, "Invalid array shape for mode." if mode in ['RGBA', 'CMYK']: assert numch == 4, "Invalid array shape for mode." # Here we know data and mode is coorect image = Image.fromstring(mode, shape, strdata) return image | a785a7f292974deb7696ea9b39f9194c356e7756 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a785a7f292974deb7696ea9b39f9194c356e7756/pilutil.py |
bytedata = ((data > high)*255).astype('b') image = Image.fromstring('L',shape,bytedata.tostring()) image = image.convert(mode='1') | bytedata = (data > high) image = Image.fromstring('1',shape,bytedata.tostring()) | def toimage(arr,high=255,low=0,cmin=None,cmax=None,pal=None, mode=None,channel_axis=None): """Takes a Numeric array and returns a PIL image. The mode of the PIL image depends on the array shape, the pal keyword, and the mode keyword. For 2-D arrays, if pal is a valid (N,3) byte-array giving the RGB values (from 0 to 255) then mode='P', otherwise mode='L', unless mode is given as 'F' or 'I' in which case a float and/or integer array is made For 3-D arrays, the channel_axis argument tells which dimension of the array holds the channel data. For 3-D arrays if one of the dimensions is 3, the mode is 'RGB' by default or 'YCbCr' if selected. if the The Numeric array must be either 2 dimensional or 3 dimensional. """ data = asarray(arr) if iscomplexobj(data): raise ValueError, "Cannot convert a complex-valued array." shape = list(data.shape) valid = len(shape)==2 or ((len(shape)==3) and \ ((3 in shape) or (4 in shape))) assert valid, "Not a suitable array shape for any mode." if len(shape) == 2: shape = (shape[1],shape[0]) # columns show up first if mode == 'F': image = Image.fromstring(mode,shape,data.astype('f').tostring()) return image if mode in [None, 'L', 'P']: bytedata = bytescale(data,high=high,low=low,cmin=cmin,cmax=cmax) image = Image.fromstring('L',shape,bytedata.tostring()) if pal is not None: image.putpalette(asarray(pal,dtype=_UInt8).tostring()) # Becomes a mode='P' automagically. elif mode == 'P': # default gray-scale pal = arange(0,256,1,dtype='b')[:,NewAxis] * \ ones((3,),dtype='b')[NewAxis,:] image.putpalette(asarray(pal,dtype=_UInt8).tostring()) return image if mode == '1': # high input gives threshold for 1 bytedata = ((data > high)*255).astype('b') image = Image.fromstring('L',shape,bytedata.tostring()) image = image.convert(mode='1') return image if cmin is None: cmin = amin(ravel(data)) if cmax is None: cmax = amax(ravel(data)) data = (data*1.0 - cmin)*(high-low)/(cmax-cmin) + low if mode == 'I': image = Image.fromstring(mode,shape,data.astype('i').tostring()) else: raise ValueError, _errstr return image # if here then 3-d array with a 3 or a 4 in the shape length. # Check for 3 in datacube shape --- 'RGB' or 'YCbCr' if channel_axis is None: if (3 in shape): ca = Numeric.nonzero(asarray(shape) == 3)[0] else: ca = Numeric.nonzero(asarray(shape) == 4) if len(ca): ca = ca[0] else: raise ValueError, "Could not find channel dimension." else: ca = channel_axis numch = shape[ca] if numch not in [3,4]: raise ValueError, "Channel axis dimension is not valid." bytedata = bytescale(data,high=high,low=low,cmin=cmin,cmax=cmax) if ca == 2: strdata = bytedata.tostring() shape = (shape[1],shape[0]) elif ca == 1: strdata = transpose(bytedata,(0,2,1)).tostring() shape = (shape[2],shape[0]) elif ca == 0: strdata = transpose(bytedata,(1,2,0)).tostring() shape = (shape[2],shape[1]) if mode is None: if numch == 3: mode = 'RGB' else: mode = 'RGBA' if mode not in ['RGB','RGBA','YCbCr','CMYK']: raise ValueError, _errstr if mode in ['RGB', 'YCbCr']: assert numch == 3, "Invalid array shape for mode." if mode in ['RGBA', 'CMYK']: assert numch == 4, "Invalid array shape for mode." # Here we know data and mode is coorect image = Image.fromstring(mode, shape, strdata) return image | a785a7f292974deb7696ea9b39f9194c356e7756 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a785a7f292974deb7696ea9b39f9194c356e7756/pilutil.py |
raise ValueError, "Only floating point sparse matrix types allowed" | self.data = self.data.astype('d') self.typecode = 'd' | def _check(self): M,N = self.shape nnz = self.indptr[-1] nzmax = len(self.rowind) | 581c924d7981707b461583419b5d513f06410592 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/581c924d7981707b461583419b5d513f06410592/Sparse.py |
typecode = _coerce_rules[(self.typecode,bmat.typecode)] ftype = _transtabl[typecode] | def matmat(self, bmat): self._check() M,K1 = self.shape K2,N = bmat.shape if (K1 != K2): raise ValueError, "Shape mismatch error." a, rowa, ptra = self.data, self.rowind, self.indptr typecode = _coerce_rules[(self.typecode,bmat.typecode)] ftype = _transtabl[typecode] if isinstance(bmat,csr_matrix): bmat._check() func = getattr(sparsetools,ftype+'cscmucsr') b = bmat.data rowb = bmat.colind ptrb = bmat.indptr elif isinstance(bmat,csc_matrix): bmat._check() func = getattr(sparsetools,ftype+'cscmucsc') b = bmat.data rowb = bmat.rowind ptrb = bmat.indptr else: bmat = bmat.tocsc() func = getattr(sparsetools,ftype+'cscmucsc') b = bmat.data rowb = bmat.rowind ptrb = bmat.indptr a, b = _convert_data(a, b, typecode) newshape = (M,N) ptrc = zeros((N+1,),'i') nnzc = 2*max(ptra[-1],ptrb[-1]) c = zeros((nnzc,),typecode) rowc = zeros((nnzc,),'i') ierr = irow = kcol = 0 while 1: c, rowc, ptrc, irow, kcol, ierr = func(M,a,rowa,ptra,b,rowb,ptrb,c,rowc,ptrc,irow,kcol, ierr) if (ierr==0): break # otherwise we were too small and must resize # calculations continue where they left off... percent_to_go = 1- (1.0*kcol) / N newnnzc = int(ceil((1+percent_to_go)*nnzc)) c = resize1d(c,newnnzc) rowc = resize1d(rowc,newnnzc) nnzc = newnnzc | 581c924d7981707b461583419b5d513f06410592 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/581c924d7981707b461583419b5d513f06410592/Sparse.py |
|
raise ValueError, "Only floating point sparse matrix types allowed" | self.typecode = 'd' self.data = self.data.astype('d') | def _check(self): M,N = self.shape if (rank(self.data) != 1) or (rank(self.colind) != 1) or \ (rank(self.indptr) != 1): raise ValueError, "Data, colind, and indptr arrays "\ "should be rank 1." if (len(self.data) != len(self.colind)): raise ValueError, "Data and row list should have same length" if (len(self.indptr) != M+1): raise ValueError, "Index pointer should be of length #rows + 1" if (len(self.colind)>0) and (max(self.colind) >= N): raise ValueError, "Column-values must be < N." if (self.indptr[-1] > len(self.colind)): raise ValueError, \ "Last value of index list should be less than "\ "the size of data list" self.nnz = self.indptr[-1] self.nzmax = len(self.colind) self.typecode = self.data.typecode() if self.typecode not in 'fdFD': raise ValueError, "Only floating point sparse matrix types allowed" self.ftype = _transtabl[self.typecode] | 581c924d7981707b461583419b5d513f06410592 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/581c924d7981707b461583419b5d513f06410592/Sparse.py |
typecode = _coerce_rules[(self.typecode,bmat.typecode)] ftype = _transtabl[typecode] | def matmat(self, bmat): self._check() M,K1 = self.shape K2,N = bmat.shape a, rowa, ptra = self.data, self.colind, self.indptr typecode = _coerce_rules[(self.typecode,bmat.typecode)] ftype = _transtabl[typecode] if (K1 != K2): raise ValueError, "Shape mismatch error." if isinstance(bmat,csc_matrix): bmat._check() func = getattr(sparsetools,ftype+'csrmucsc') b = bmat.data colb = bmat.rowind ptrb = bmat.indptr out = 'csc' firstarg = () elif isinstance(bmat,csr_matrix): bmat._check() func = getattr(sparsetools,ftype+'cscmucsc') b, colb, rowb = a, rowa, ptra a, rowa, ptra = bmat.data, bmat.colind, bmat,indptr out = 'csr' firstarg = (N,) else: bmat = bmat.tocsc() func = getattr(sparsetools,ftype+'csrmucsc') b = bmat.data colb = bmat.colind ptrb = bmat.indptr out = 'csc' firstarg = () a, b = _convert_data(a, b, typecode) newshape = (M,N) if out == 'csr': ptrc = zeros((M+1,),'i') else: ptrc = zeros((N+1,),'i') nnzc = 2*max(ptra[-1],ptrb[-1]) c = zeros((nnzc,),typecode) rowc = zeros((nnzc,),'i') ierr = irow = kcol = 0 while 1: args = firstarg+(a,rowa,ptra,b,colb,ptrb,c,rowc,ptrc,irow, kcol, ierr) c, rowc, ptrc, irow, kcol, ierr = func(*args) if (ierr==0): break # otherwise we were too small and must resize percent_to_go = 1- (1.0*kcol) / N newnnzc = int(ceil((1+percent_to_go)*nnzc)) c = resize1d(c,newnnzc) rowc = resize1d(rowc,newnnzc) nnzc = newnnzc | 581c924d7981707b461583419b5d513f06410592 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/581c924d7981707b461583419b5d513f06410592/Sparse.py |
|
if isinstance(bmat,csc_matrix): | if isinstance(bmat,csc_matrix): | def matmat(self, bmat): self._check() M,K1 = self.shape K2,N = bmat.shape a, rowa, ptra = self.data, self.colind, self.indptr typecode = _coerce_rules[(self.typecode,bmat.typecode)] ftype = _transtabl[typecode] if (K1 != K2): raise ValueError, "Shape mismatch error." if isinstance(bmat,csc_matrix): bmat._check() func = getattr(sparsetools,ftype+'csrmucsc') b = bmat.data colb = bmat.rowind ptrb = bmat.indptr out = 'csc' firstarg = () elif isinstance(bmat,csr_matrix): bmat._check() func = getattr(sparsetools,ftype+'cscmucsc') b, colb, rowb = a, rowa, ptra a, rowa, ptra = bmat.data, bmat.colind, bmat,indptr out = 'csr' firstarg = (N,) else: bmat = bmat.tocsc() func = getattr(sparsetools,ftype+'csrmucsc') b = bmat.data colb = bmat.colind ptrb = bmat.indptr out = 'csc' firstarg = () a, b = _convert_data(a, b, typecode) newshape = (M,N) if out == 'csr': ptrc = zeros((M+1,),'i') else: ptrc = zeros((N+1,),'i') nnzc = 2*max(ptra[-1],ptrb[-1]) c = zeros((nnzc,),typecode) rowc = zeros((nnzc,),'i') ierr = irow = kcol = 0 while 1: args = firstarg+(a,rowa,ptra,b,colb,ptrb,c,rowc,ptrc,irow, kcol, ierr) c, rowc, ptrc, irow, kcol, ierr = func(*args) if (ierr==0): break # otherwise we were too small and must resize percent_to_go = 1- (1.0*kcol) / N newnnzc = int(ceil((1+percent_to_go)*nnzc)) c = resize1d(c,newnnzc) rowc = resize1d(rowc,newnnzc) nnzc = newnnzc | 581c924d7981707b461583419b5d513f06410592 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/581c924d7981707b461583419b5d513f06410592/Sparse.py |
return gstrf(N,csc.nnz,csc.data,csc.rowind,csc.colptr,permc_spec, | return gstrf(N,csc.nnz,csc.data,csc.rowind,csc.indptr,permc_spec, | def lu_factor(A, permc_spec=2, diag_pivot_thresh=1.0, drop_tol=0.0, relax=1, panel_size=10): M,N = A.shape if (M != N): raise ValueError, "Can only factor square matrices." csc = A.tocsc() gstrf = eval('_superlu.' + csc.ftype + 'gstrf') return gstrf(N,csc.nnz,csc.data,csc.rowind,csc.colptr,permc_spec, diag_pivot_thresh, drop_tol, relax, panel_size) | 581c924d7981707b461583419b5d513f06410592 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/581c924d7981707b461583419b5d513f06410592/Sparse.py |
a = array((1,2,3,4),Float32) | a = array((1,2,3,4),Float64) | def check_1D_array(self): a = array((1,2,3,4),Float32) actual= stats.hmean(a) desired = 4. / (1./1 + 1./2 + 1./3 + 1./4) assert_almost_equal(desired,actual,decimal=14) | df519ee6c07847bf1a14f9089d81e12bbe7c4347 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/df519ee6c07847bf1a14f9089d81e12bbe7c4347/test_stats.py |
just return the wx_class unaltered. """ | just return the wx_class unaltered. The proxied class is wrapped by the smart_class that returns a proxied or normal instance depending on where the instantiation occurs. """ | def register(wx_class): """ Create a gui_thread compatible version of wx_class Test whether a proxy is necessary. If so, generate and return the proxy class. if not, just return the wx_class unaltered. """ if running_in_second_thread: #print 'proxy generated' return proxify(wx_class) else: if not hasattr(wx_class, '_iNiT2'): if hasattr(wx_class, '__init__'): wx_class._iNiT2 = wx_class.__init__ else: wx_class._iNiT2 = None wx_class.__init__ = plain_class__init__ return wx_class | 4d50a24916bde5ccf37876de92ba8f8d8c0a41c4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/4d50a24916bde5ccf37876de92ba8f8d8c0a41c4/main.py |
return proxify(wx_class) | return smart_class(wx_class, proxify(wx_class)) | def register(wx_class): """ Create a gui_thread compatible version of wx_class Test whether a proxy is necessary. If so, generate and return the proxy class. if not, just return the wx_class unaltered. """ if running_in_second_thread: #print 'proxy generated' return proxify(wx_class) else: if not hasattr(wx_class, '_iNiT2'): if hasattr(wx_class, '__init__'): wx_class._iNiT2 = wx_class.__init__ else: wx_class._iNiT2 = None wx_class.__init__ = plain_class__init__ return wx_class | 4d50a24916bde5ccf37876de92ba8f8d8c0a41c4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/4d50a24916bde5ccf37876de92ba8f8d8c0a41c4/main.py |
args = dereference_arglist(args) | args = dereference_arglist(args) dkw = dereference_dict(kw) | body = """def %(method)s(self,*args,**kw): \"\"\"%(documentation)s\"\"\" %(pre_test)s from gui_thread_guts import proxy_event, smart_return %(import_statement)s # remove proxies if present args = dereference_arglist(args) %(arguments)s # inserts proxied object up front ret_val = None if in_proxy_call: ret_val = apply(%(call_method)s, arg_list, kw) else: finished = threading.Event() evt = proxy_event(%(call_method)s,arg_list,kw,finished) self.post(evt) finished.wait() if finished.exception_info: raise finished.exception_info[0], \ finished.exception_info[1] ret_val = finished._result %(results)s #results\n""" %locals() | 4d50a24916bde5ccf37876de92ba8f8d8c0a41c4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/4d50a24916bde5ccf37876de92ba8f8d8c0a41c4/main.py |
ret_val = apply(%(call_method)s, arg_list, kw) | ret_val = apply(%(call_method)s, arg_list, dkw) | body = """def %(method)s(self,*args,**kw): \"\"\"%(documentation)s\"\"\" %(pre_test)s from gui_thread_guts import proxy_event, smart_return %(import_statement)s # remove proxies if present args = dereference_arglist(args) %(arguments)s # inserts proxied object up front ret_val = None if in_proxy_call: ret_val = apply(%(call_method)s, arg_list, kw) else: finished = threading.Event() evt = proxy_event(%(call_method)s,arg_list,kw,finished) self.post(evt) finished.wait() if finished.exception_info: raise finished.exception_info[0], \ finished.exception_info[1] ret_val = finished._result %(results)s #results\n""" %locals() | 4d50a24916bde5ccf37876de92ba8f8d8c0a41c4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/4d50a24916bde5ccf37876de92ba8f8d8c0a41c4/main.py |
evt = proxy_event(%(call_method)s,arg_list,kw,finished) | evt = proxy_event(%(call_method)s,arg_list,dkw,finished) | body = """def %(method)s(self,*args,**kw): \"\"\"%(documentation)s\"\"\" %(pre_test)s from gui_thread_guts import proxy_event, smart_return %(import_statement)s # remove proxies if present args = dereference_arglist(args) %(arguments)s # inserts proxied object up front ret_val = None if in_proxy_call: ret_val = apply(%(call_method)s, arg_list, kw) else: finished = threading.Event() evt = proxy_event(%(call_method)s,arg_list,kw,finished) self.post(evt) finished.wait() if finished.exception_info: raise finished.exception_info[0], \ finished.exception_info[1] ret_val = finished._result %(results)s #results\n""" %locals() | 4d50a24916bde5ccf37876de92ba8f8d8c0a41c4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/4d50a24916bde5ccf37876de92ba8f8d8c0a41c4/main.py |
hasattr(x, 'x._proxy_attr__dont_mess_with_me_unless_you_know_what_youre_doing') | return hasattr(x, 'x._proxy_attr__dont_mess_with_me_unless_you_know_what_youre_doing') | def is_proxy_attr(x): hasattr(x, 'x._proxy_attr__dont_mess_with_me_unless_you_know_what_youre_doing') | 4d50a24916bde5ccf37876de92ba8f8d8c0a41c4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/4d50a24916bde5ccf37876de92ba8f8d8c0a41c4/main.py |
res.append(get_proxy_attr_obj(arg)) | obj = get_proxy_attr_obj(arg) if is_proxy(obj): res.append(obj.wx_obj) else: res.append(obj) | def dereference_arglist(lst): """ Scan for proxy objects and convert to underlying object """ res = [] for arg in lst: if is_proxy(arg): res.append(arg.wx_obj) #print 'dereferenced ', arg.wx_obj elif is_proxy_attr(arg): res.append(get_proxy_attr_obj(arg)) else: res.append(arg) return res | 4d50a24916bde5ccf37876de92ba8f8d8c0a41c4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/4d50a24916bde5ccf37876de92ba8f8d8c0a41c4/main.py |
M = int(amax(ij[0])) N = int(amax(ij[1])) | M = int(amax(ij[0])) + 1 N = int(amax(ij[1])) + 1 | def __init__(self, obj, ij_in, dims=None, nzmax=None, dtype=None): spmatrix.__init__(self) try: # Assume the first calling convention # assert len(ij) == 2 if len(ij_in) != 2: if isdense( ij_in ) and (ij_in.shape[1] == 2): ij = (ij_in[:,0], ij_in[:,1]) else: raise AssertionError else: ij = ij_in if dims is None: M = int(amax(ij[0])) N = int(amax(ij[1])) self.shape = (M, N) else: # Use 2 steps to ensure dims has length 2. M, N = dims self.shape = (M, N) self.row = asarray(ij[0]) self.col = asarray(ij[1]) self.data = asarray(obj, dtype=dtype) self.dtype = self.data.dtype if nzmax is None: nzmax = len(self.data) self.nzmax = nzmax self._check() except Exception, e: raise e, "invalid input format" | 57e057b52364d5b77af01198799ed00aea306d4c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/57e057b52364d5b77af01198799ed00aea306d4c/sparse.py |
except Exception, e: raise e, "invalid input format" | except Exception: print "invalid input format" raise | def __init__(self, obj, ij_in, dims=None, nzmax=None, dtype=None): spmatrix.__init__(self) try: # Assume the first calling convention # assert len(ij) == 2 if len(ij_in) != 2: if isdense( ij_in ) and (ij_in.shape[1] == 2): ij = (ij_in[:,0], ij_in[:,1]) else: raise AssertionError else: ij = ij_in if dims is None: M = int(amax(ij[0])) N = int(amax(ij[1])) self.shape = (M, N) else: # Use 2 steps to ensure dims has length 2. M, N = dims self.shape = (M, N) self.row = asarray(ij[0]) self.col = asarray(ij[1]) self.data = asarray(obj, dtype=dtype) self.dtype = self.data.dtype if nzmax is None: nzmax = len(self.data) self.nzmax = nzmax self._check() except Exception, e: raise e, "invalid input format" | 57e057b52364d5b77af01198799ed00aea306d4c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/57e057b52364d5b77af01198799ed00aea306d4c/sparse.py |
import os.path as op | def configuration(parent_package='',top_path=None): import numpy from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info import os.path as op config = Configuration( 'umfpack', parent_package, top_path ) config.add_data_dir('tests') umf_info = get_info( 'umfpack', notfound_action = 1 ) if umf_info: print 'Umfpack present, ok.' else: return None scipyInclude = numpy.get_numpy_include() umfpackInclude = umf_info['include_dirs'][0] config.add_extension( '__umfpack', sources = ['umfpack.i'], swig_opts = ['-I' + umfpackInclude], include_dirs = [umfpackInclude, scipyInclude], libraries = ['cblas'], extra_objects = umf_info['extra_objects'] ) # config.add_scripts( 'test_umfpack.py' ) return config | 0beefb64e326cff6a092b86769cce70c9582ce1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0beefb64e326cff6a092b86769cce70c9582ce1a/setup.py |
|
if umf_info: print 'Umfpack present, ok.' else: return None | def configuration(parent_package='',top_path=None): import numpy from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info import os.path as op config = Configuration( 'umfpack', parent_package, top_path ) config.add_data_dir('tests') umf_info = get_info( 'umfpack', notfound_action = 1 ) if umf_info: print 'Umfpack present, ok.' else: return None scipyInclude = numpy.get_numpy_include() umfpackInclude = umf_info['include_dirs'][0] config.add_extension( '__umfpack', sources = ['umfpack.i'], swig_opts = ['-I' + umfpackInclude], include_dirs = [umfpackInclude, scipyInclude], libraries = ['cblas'], extra_objects = umf_info['extra_objects'] ) # config.add_scripts( 'test_umfpack.py' ) return config | 0beefb64e326cff6a092b86769cce70c9582ce1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0beefb64e326cff6a092b86769cce70c9582ce1a/setup.py |
|
scipyInclude = numpy.get_numpy_include() umfpackInclude = umf_info['include_dirs'][0] | umfpack_i_file = config.paths('umfpack.i')[0] def umfpack_i(ext, build_dir): if umf_info: return umfpack_i_file | def configuration(parent_package='',top_path=None): import numpy from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info import os.path as op config = Configuration( 'umfpack', parent_package, top_path ) config.add_data_dir('tests') umf_info = get_info( 'umfpack', notfound_action = 1 ) if umf_info: print 'Umfpack present, ok.' else: return None scipyInclude = numpy.get_numpy_include() umfpackInclude = umf_info['include_dirs'][0] config.add_extension( '__umfpack', sources = ['umfpack.i'], swig_opts = ['-I' + umfpackInclude], include_dirs = [umfpackInclude, scipyInclude], libraries = ['cblas'], extra_objects = umf_info['extra_objects'] ) # config.add_scripts( 'test_umfpack.py' ) return config | 0beefb64e326cff6a092b86769cce70c9582ce1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0beefb64e326cff6a092b86769cce70c9582ce1a/setup.py |
sources = ['umfpack.i'], swig_opts = ['-I' + umfpackInclude], include_dirs = [umfpackInclude, scipyInclude], | sources = [umfpack_i], | def configuration(parent_package='',top_path=None): import numpy from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info import os.path as op config = Configuration( 'umfpack', parent_package, top_path ) config.add_data_dir('tests') umf_info = get_info( 'umfpack', notfound_action = 1 ) if umf_info: print 'Umfpack present, ok.' else: return None scipyInclude = numpy.get_numpy_include() umfpackInclude = umf_info['include_dirs'][0] config.add_extension( '__umfpack', sources = ['umfpack.i'], swig_opts = ['-I' + umfpackInclude], include_dirs = [umfpackInclude, scipyInclude], libraries = ['cblas'], extra_objects = umf_info['extra_objects'] ) # config.add_scripts( 'test_umfpack.py' ) return config | 0beefb64e326cff6a092b86769cce70c9582ce1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0beefb64e326cff6a092b86769cce70c9582ce1a/setup.py |
extra_objects = umf_info['extra_objects'] ) | **umf_info) | def configuration(parent_package='',top_path=None): import numpy from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info import os.path as op config = Configuration( 'umfpack', parent_package, top_path ) config.add_data_dir('tests') umf_info = get_info( 'umfpack', notfound_action = 1 ) if umf_info: print 'Umfpack present, ok.' else: return None scipyInclude = numpy.get_numpy_include() umfpackInclude = umf_info['include_dirs'][0] config.add_extension( '__umfpack', sources = ['umfpack.i'], swig_opts = ['-I' + umfpackInclude], include_dirs = [umfpackInclude, scipyInclude], libraries = ['cblas'], extra_objects = umf_info['extra_objects'] ) # config.add_scripts( 'test_umfpack.py' ) return config | 0beefb64e326cff6a092b86769cce70c9582ce1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0beefb64e326cff6a092b86769cce70c9582ce1a/setup.py |
raise ValueErrro, "Both rp and rs must be provided to design an elliptic filter." | raise ValueError, "Both rp and rs must be provided to design an elliptic filter." | def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=0, ftype='butter', output='ba'): """IIR digital and analog filter design given order and critical points. Description: Design an Nth order lowpass digital or analog filter and return the filter coefficients in (B,A) (numerator, denominator) or (Z,P,K) form. Inputs: N -- the order of the filter. Wn -- a scalar or length-2 sequence giving the critical frequencies. rp, rs -- For chebyshev and elliptic filters provides the maximum ripple in the passband and the minimum attenuation in the stop band. btype -- the type of filter (lowpass, highpass, bandpass, or bandstop). analog -- non-zero to return an analog filter, otherwise a digital filter is returned. ftype -- the type of IIR filter (Butterworth, Cauer (Elliptic), Bessel, Chebyshev1, Chebyshev2) output -- 'ba' for (b,a) output, 'zpk' for (z,p,k) output. SEE ALSO butterord, cheb1ord, cheb2ord, ellipord """ ftype, btype, output = map(string.lower, (ftype, btype, output)) Wn = asarray(Wn) try: btype = band_dict[btype] except KeyError: raise ValueError, "%s is an invalid bandtype for filter." % btype try: typefunc = filter_dict[ftype][0] except KeyError: raise ValueError, "%s is not a valid basic iir filter." % ftype if output not in ['ba', 'zpk']: raise ValueError, "%s is not a valid output form." % output #pre-warp frequencies for digital filter design if not analog: fs = 2.0 warped = 2*fs*tan(pi*Wn/fs) else: warped = Wn # convert to low-pass prototype if btype in ['lowpass', 'highpass']: wo = warped else: bw = warped[1] - warped[0] wo = sqrt(warped[0]*warped[1]) # Get analog lowpass prototype if typefunc in [buttap, besselap]: z, p, k = typefunc(N) elif typefunc == cheb1ap: if rp is None: raise ValueError, "passband ripple (rp) must be provided to design a Chebyshev I filter." z, p, k = typefunc(N, rp) elif typefunc == cheb2ap: if rs is None: raise ValueError, "stopband atteunatuion (rs) must be provided to design an Chebyshev II filter." z, p, k = typefunc(N, rs) else: # Elliptic filters if rs is None or rp is None: raise ValueErrro, "Both rp and rs must be provided to design an elliptic filter." z, p, k = typefunc(N, rp, rs) b, a = zpk2tf(z,p,k) # transform to lowpass, bandpass, highpass, or bandstop if btype == 'lowpass': b, a = lp2lp(b,a,wo=wo) elif btype == 'highpass': b, a = lp2hp(b,a,wo=wo) elif btype == 'bandpass': b, a = lp2bp(b,a,wo=wo,bw=bw) else: # 'bandstop' b, a = lp2bs(b,a,wo=wo,bw=bw) # Find discrete equivalent if necessary if not analog: b, a = bilinear(b, a, fs=fs) # Transform to proper out type (pole-zero, state-space, numer-denom) if output == 'zpk': return tf2zpk(b,a) else: return b,a | f294fbfbcba8fd09ee3427e2fb9d597320fb8aae /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/f294fbfbcba8fd09ee3427e2fb9d597320fb8aae/filter_design.py |
v v v v v v v | def check_complex_expr(self): | 59b4e95f7f51e2949dfc41d870520dd7f49c95eb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/59b4e95f7f51e2949dfc41d870520dd7f49c95eb/test_numexpr.py |
|
^ ^ ^ ^ ^ ^ ^ | def complex(a, b): c = zeros(a.shape, dtype=complex_) | 59b4e95f7f51e2949dfc41d870520dd7f49c95eb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/59b4e95f7f51e2949dfc41d870520dd7f49c95eb/test_numexpr.py |
|
v v v v v v v | def complex(a, b): c = zeros(a.shape, dtype=complex_) | 59b4e95f7f51e2949dfc41d870520dd7f49c95eb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/59b4e95f7f51e2949dfc41d870520dd7f49c95eb/test_numexpr.py |
|
^ ^ ^ ^ ^ ^ ^ | def complex(a, b): c = zeros(a.shape, dtype=complex_) | 59b4e95f7f51e2949dfc41d870520dd7f49c95eb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/59b4e95f7f51e2949dfc41d870520dd7f49c95eb/test_numexpr.py |
|
v v v v v v v ^ ^ ^ ^ ^ ^ ^ | def complex(a, b): c = zeros(a.shape, dtype=complex_) | 59b4e95f7f51e2949dfc41d870520dd7f49c95eb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/59b4e95f7f51e2949dfc41d870520dd7f49c95eb/test_numexpr.py |
|
v v v v v v v | def complex(a, b): c = zeros(a.shape, dtype=complex_) | 59b4e95f7f51e2949dfc41d870520dd7f49c95eb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/59b4e95f7f51e2949dfc41d870520dd7f49c95eb/test_numexpr.py |
|
^ ^ ^ ^ ^ ^ ^ | def complex(a, b): c = zeros(a.shape, dtype=complex_) | 59b4e95f7f51e2949dfc41d870520dd7f49c95eb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/59b4e95f7f51e2949dfc41d870520dd7f49c95eb/test_numexpr.py |
|
v v v v v v v | def complex(a, b): c = zeros(a.shape, dtype=complex_) | 59b4e95f7f51e2949dfc41d870520dd7f49c95eb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/59b4e95f7f51e2949dfc41d870520dd7f49c95eb/test_numexpr.py |
|
^ ^ ^ ^ ^ ^ ^ | def complex(a, b): c = zeros(a.shape, dtype=complex_) | 59b4e95f7f51e2949dfc41d870520dd7f49c95eb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/59b4e95f7f51e2949dfc41d870520dd7f49c95eb/test_numexpr.py |
|
v v v v v v v | def complex(a, b): c = zeros(a.shape, dtype=complex_) | 59b4e95f7f51e2949dfc41d870520dd7f49c95eb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/59b4e95f7f51e2949dfc41d870520dd7f49c95eb/test_numexpr.py |
|
^ ^ ^ ^ ^ ^ ^ | def complex(a, b): c = zeros(a.shape, dtype=complex_) | 59b4e95f7f51e2949dfc41d870520dd7f49c95eb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/59b4e95f7f51e2949dfc41d870520dd7f49c95eb/test_numexpr.py |
|
v v v v v v v | def complex(a, b): c = zeros(a.shape, dtype=complex_) | 59b4e95f7f51e2949dfc41d870520dd7f49c95eb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/59b4e95f7f51e2949dfc41d870520dd7f49c95eb/test_numexpr.py |
|
^ ^ ^ ^ ^ ^ ^ | def complex(a, b): c = zeros(a.shape, dtype=complex_) | 59b4e95f7f51e2949dfc41d870520dd7f49c95eb /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/59b4e95f7f51e2949dfc41d870520dd7f49c95eb/test_numexpr.py |
|
beta = select([A>50, A>21], [0.1102*(A-8.7), 0.5842*(A-21)**(0.4) + 0.07866*(A-21)], 0.0) | if (A>50): beta = 0.1102*(A-8.7) elif (A>21): beta = 0.5842*(A-21)**0.4 + 0.07886*(A-21) else: beta = 0.0 | def kaiserord(ripple, width): """Design a Kaiser window to limit ripple and width of transition region. Inputs: ripple -- positive number specifying maximum ripple in passband (dB) and minimum ripple in stopband width -- width of transition region (normalized so that 1 corresponds to pi radians / sample) Outputs: N, beta -- the order and beta parameter for the kaiser window. signal.kaiser(N,beta,sym=0) returns the window as does signal.get_window(beta,N) signal.get_window(('kaiser',beta),N) Uses the empirical equations discovered by Kaiser. Oppenheim, Schafer, "Discrete-Time Signal Processing,", p.475-476. """ A = abs(ripple) # in case somebody is confused as to what's meant beta = select([A>50, A>21], [0.1102*(A-8.7), 0.5842*(A-21)**(0.4) + 0.07866*(A-21)], 0.0) N = (A-8)/2.285/(pi*width) return ceil(N), beta | ef7ae2163847f15d6f109eac68148b2b4694233b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/ef7ae2163847f15d6f109eac68148b2b4694233b/filter_design.py |
for key in self.keys(): if key[indx] in cols_or_rows: res[key] = self[key] | N = len(cols_or_rows) if indx: for key in self.keys(): num = searchsorted(cols_or_rows,key[1]) if num < N: newkey = (key[0],num) res[newkey] = self[key] else: for key in self.keys(): num = searchsorted(cols_or_rows,key[0]) if num < N: newkey = (num,key[1]) res[newkey] = self[key] | def take(self, cols_or_rows, columns=1): # Extract columns or rows as indictated from matrix res = dictmatrix() indx = int((columns == 1)) for key in self.keys(): if key[indx] in cols_or_rows: res[key] = self[key] return res | 7d60f439c1f14bf97fbf1ded32a5ecb59793f242 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/7d60f439c1f14bf97fbf1ded32a5ecb59793f242/Sparse.py |
f = open(source,'w') | f = open(target,'w') | def get_cblas_source(ext, build_dir): name = ext.name.split('.')[-1] assert name=='cblas',`name` if atlas_version is None: target = join(build_dir,target_dir,'cblas.pyf') from distutils.dep_util import newer if newer(__file__,target): f = open(source,'w') f.write(tmpl_empty_cblas_pyf) f.close() else: target = ext.depends[0] assert os.path.basename(target)=='cblas.pyf.src' return target | 6e5cad3f6513055c6057bb0caa3c100d26866d61 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/6e5cad3f6513055c6057bb0caa3c100d26866d61/setup_blas.py |
h = wiener(g) assert_array_almost_equal(h,correct,decimal=6) | h = wiener(g) assert_array_almost_equal(h,correct,decimal=6) | def check_basic(self): g = Numeric.array([[5,6,4,3],[3,5,6,2],[2,3,5,6],[1,6,9,7]],'d') correct = Numeric.array([[2.16374269,3.2222222222, 2.8888888889, 1.6666666667],[2.666666667, 4.33333333333, 4.44444444444, 2.8888888888],[2.222222222, 4.4444444444, 5.4444444444, 4.801066874837],[1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]]) h = wiener(g) assert_array_almost_equal(h,correct,decimal=6) | afdfe630da72326dbdc18cfc00e7782f2ab82f35 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/afdfe630da72326dbdc18cfc00e7782f2ab82f35/test_signaltools.py |
assert_array_almost_equal(f(3,[3],[-4]),[-36]) | assert_array_almost_equal(f(3,[3],[-4]),[[-36]]) | def check_gemm(self): for p in 'sd': f = getattr(fblas,p+'gemm',None) if f is None: continue assert_array_almost_equal(f(3,[3],[-4]),[-36]) assert_array_almost_equal(f(3,[3],[-4],3,[5]),[-21]) for p in 'cz': f = getattr(fblas,p+'gemm',None) if f is None: continue assert_array_almost_equal(f(3j,[3-4j],[-4]),[-48-36j]) assert_array_almost_equal(f(3j,[3-4j],[-4],3,[5j]),[-48-21j]) | dd7605b66704602793ae5a516e8f3a5ee4157154 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/dd7605b66704602793ae5a516e8f3a5ee4157154/test_blas.py |
assert_array_almost_equal(f(3j,[3-4j],[-4]),[-48-36j]) | assert_array_almost_equal(f(3j,[3-4j],[-4]),[[-48-36j]]) | def check_gemm(self): for p in 'sd': f = getattr(fblas,p+'gemm',None) if f is None: continue assert_array_almost_equal(f(3,[3],[-4]),[-36]) assert_array_almost_equal(f(3,[3],[-4],3,[5]),[-21]) for p in 'cz': f = getattr(fblas,p+'gemm',None) if f is None: continue assert_array_almost_equal(f(3j,[3-4j],[-4]),[-48-36j]) assert_array_almost_equal(f(3j,[3-4j],[-4],3,[5j]),[-48-21j]) | dd7605b66704602793ae5a516e8f3a5ee4157154 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/dd7605b66704602793ae5a516e8f3a5ee4157154/test_blas.py |
if not issubclass(line.dtype.type, int) or not line.iscontiguous()): | if not issubclass(line.dtype.type, int) or not line.iscontiguous(): | def polyline(dc,line,xoffset=0,yoffset=0): #------------------------------------------------------------------------ # Make sure the array is the correct size/shape #------------------------------------------------------------------------ shp = line.shape assert(len(shp)==2 and shp[1] == 2) #------------------------------------------------------------------------ # Offset data if necessary #------------------------------------------------------------------------ if xoffset or yoffset: line = line + array((xoffset,yoffset),line.typecode()) #------------------------------------------------------------------------ # Define the win32 version of the function #------------------------------------------------------------------------ if sys.platform == 'win32': # win32 requires int type for lines. if not issubclass(line.dtype.type, int) or not line.iscontiguous()): line = line.astype(int) code = """ HDC hdc = (HDC) dc->GetHDC(); Polyline(hdc,(POINT*)line,Nline[0]); """ else: if (line.typecode() != uint16 or not line.iscontiguous()): line = line.astype(uint16) code = """ GdkWindow* win = dc->m_window; GdkGC* pen = dc->m_penGC; gdk_draw_lines(win,pen,(GdkPoint*)line,Nline[0]); """ weave.inline(code,['dc','line']) #------------------------------------------------------------------------ # Find the maximum and minimum points in the drawing list and add # them to the bounding box. #------------------------------------------------------------------------ max_pt = maximum.reduce(line,0) min_pt = minimum.reduce(line,0) dc.CalcBoundingBox(max_pt[0],max_pt[1]) dc.CalcBoundingBox(min_pt[0],min_pt[1]) | b87a2bda3158afdebfcbff0f8b776db588cc79e7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b87a2bda3158afdebfcbff0f8b776db588cc79e7/wx_speed.py |
v = [(s.delta, s.time) for s in subjects] | def initialize(self, subjects): | c9c3c7d8f2e2a3ad7a3edb438ee0e18d948101c7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c9c3c7d8f2e2a3ad7a3edb438ee0e18d948101c7/cox.py |
|
if not _hold: gist.fma() gist.animate(0) | try: _style = None saveval = gist.plsys(2) gist.plsys(saveval) except: _style = 'default' if not _hold: gist.fma() gist.animate(0) | def imagesc(z,cmin=None,cmax=None,xryr=None,_style='default', palette=None, color='black'): """Plot an image on axes. z -- The data cmin -- Value to map to lowest color in palette (min(z) if None) cmax -- Value to map to highest color in palette (max(z) if None) xryr -- (xmin, ymin, xmax, ymax) coordinates to print (0, 0, z.shape[1], z.shape[0]) if None _style -- A 'style-sheet' to use if desired (a default one will be used if 'default'). If None, then no style will be imposed. palette -- A string for a palette previously saved in a file (see write_palette) or an array specifying the red-green-blue values (2-d array N x 3) or gray-scale values (2-d array N x 1 or 1-d array). color -- The color to use for the axes. """ if xryr is None: xryr = (0,0,z.shape[1],z.shape[0]) if not _hold: gist.fma() gist.animate(0) if _style is not None: if _style == "default": _style='/tmp/image.gs' system = write_style.getsys(hticpos='below',vticpos='left',frame=1, color=color) fid = open(_style,'w') fid.write(write_style.style2string(system)) fid.close() gist.window(style=_style) if cmax is None: cmax = max(ravel(z)) if cmin is None: cmin = min(ravel(z)) cmax = float(cmax) cmin = float(cmin) byteimage = gist.bytscl(z,cmin=cmin,cmax=cmax) change_palette(palette) gist.pli(byteimage,xryr[0],xryr[1],xryr[2],xryr[3]) return | c3fd93bfa60f92bb1be62ae0bd29941be5afd8e7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c3fd93bfa60f92bb1be62ae0bd29941be5afd8e7/Mplot.py |
assert(isnan(array(1+1j)/0.) == 0) | assert(isnan(array(0+0j)/0.) == 1) | def check_complex1(self): assert(isnan(array(1+1j)/0.) == 0) | 9496b106d6e4258c5be22603073cc93174e67902 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9496b106d6e4258c5be22603073cc93174e67902/test_misc.py |
assert(vals.imag > 1e10 and isfinite(vals)) | assert(vals.imag ==0) | def check_complex_bad(self): v = 1+1j v += array(0+1.j)/0. vals = nan_to_num(v) assert(vals.imag > 1e10 and isfinite(vals)) | 9496b106d6e4258c5be22603073cc93174e67902 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9496b106d6e4258c5be22603073cc93174e67902/test_misc.py |
assert(vals.imag > 1e10 and isfinite(vals)) | assert(isfinite(vals)) | def check_complex_bad2(self): v = 1+1j v += array(-1+1.j)/0. vals = nan_to_num(v) assert(vals.imag > 1e10 and isfinite(vals)) # !! This is actually (unexpectedly) positive # !! inf. Comment out for now, and see if it # !! changes #assert(vals.real < -1e10 and isfinite(vals)) | 9496b106d6e4258c5be22603073cc93174e67902 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9496b106d6e4258c5be22603073cc93174e67902/test_misc.py |
dlg = wx.wxFileDialog(self, "Save As", ".", "", "*.*", wx.wxSAVE) | def OnFileSaveAs(self, event): import os dlg = wx.wxFileDialog(self, "Save As", ".", "", "*.*", wx.wxSAVE) wildcard = "PNG files (*.png)|*.png|" \ "BMP files (*.bmp)|*.bmp|" \ "JPEG files (*.jpg)|*.jpg|" \ "PCX files (*.pcx)|*.pcx|" \ "TIFF files (*.tif)|*.tif" dlg.SetWildcard(wildcard) dlg.SetWildcard(wildcard) if dlg.ShowModal() == wx.wxID_OK: f = dlg.GetPath() dummy, ftype = os.path.splitext(f) # strip . ftype = ftype[1:] if ftype in image_type_map.keys(): self.client.save(dlg.GetPath(),ftype) else: msg = "Extension is currently used to determine file type." \ "'%s' is not a vaild extension." \ "You may use one of the following extensions. %s" \ % (ftype,image_type_map.keys()) d = wx.wxMessageDialog(self,msg,style=wx.wxOK) d.ShowModal() d.Destroy() dlg.Destroy() | 1624b66f750e0de9c6b5a88fb2e7e4e852266e3c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1624b66f750e0de9c6b5a88fb2e7e4e852266e3c/wxplt.py |
|
"TIFF files (*.tif)|*.tif" dlg.SetWildcard(wildcard) dlg.SetWildcard(wildcard) | "TIFF files (*.tif)|*.tif|" \ "All Files |*|" dlg = wx.wxFileDialog(self, "Save As", ".", "", wildcard, wx.wxSAVE) | def OnFileSaveAs(self, event): import os dlg = wx.wxFileDialog(self, "Save As", ".", "", "*.*", wx.wxSAVE) wildcard = "PNG files (*.png)|*.png|" \ "BMP files (*.bmp)|*.bmp|" \ "JPEG files (*.jpg)|*.jpg|" \ "PCX files (*.pcx)|*.pcx|" \ "TIFF files (*.tif)|*.tif" dlg.SetWildcard(wildcard) dlg.SetWildcard(wildcard) if dlg.ShowModal() == wx.wxID_OK: f = dlg.GetPath() dummy, ftype = os.path.splitext(f) # strip . ftype = ftype[1:] if ftype in image_type_map.keys(): self.client.save(dlg.GetPath(),ftype) else: msg = "Extension is currently used to determine file type." \ "'%s' is not a vaild extension." \ "You may use one of the following extensions. %s" \ % (ftype,image_type_map.keys()) d = wx.wxMessageDialog(self,msg,style=wx.wxOK) d.ShowModal() d.Destroy() dlg.Destroy() | 1624b66f750e0de9c6b5a88fb2e7e4e852266e3c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1624b66f750e0de9c6b5a88fb2e7e4e852266e3c/wxplt.py |
if (_obj.search(line) is not None): if atype not in scipy_base.typecodes['Complex']: scipy_base.disp("Warning: Complex data detected, but requested typecode was not complex.") | if _not_warned: if (_obj.search(line) is not None): warn = 1 for k in range(len(atype)): if atype[k] in scipy_base.typecodes['Complex']: warn = 0 if warn: scipy_base.disp("Warning: Complex data detected, but no requested typecode was complex.") _not_warned = 0 | def process_line(line, separator, collist, atype, missing): strlist = [] line = _obj.sub(r"\1\3\5",line) # remove spaces between real # and imaginary parts of complex numbers if (_obj.search(line) is not None): if atype not in scipy_base.typecodes['Complex']: scipy_base.disp("Warning: Complex data detected, but requested typecode was not complex.") for mysep in separator[:-1]: if mysep is None: newline, ind = move_past_spaces(line) strlist.append(line[:ind]) line = newline else: ind = line.find(mysep) strlist.append(line[:ind]) line = line[ind+len(mysep):] strlist.extend(line.split(separator[-1])) arlist = array(strlist,'O') N = len(atype) vals = [None]*N for k in range(len(atype)): vals[k] = extract_columns(arlist, collist[k], atype[k], missing) return vals | a87c3471b990f92c7277936c8f678afc5f027940 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a87c3471b990f92c7277936c8f678afc5f027940/array_import.py |
elif isinstance(key, int): | elif isintlike(key): | def __getitem__(self, key): if isinstance(key, tuple): row = key[0] col = key[1] if isinstance(col, slice): raise IndexError, "csc_matrix supports slices only of a single"\ " column" elif isinstance(row, slice): return self._getcolslice(row, col) M, N = self.shape if (row < 0): row = M + row if (col < 0): col = N + col if not (0<=row<M) or not (0<=col<N): raise IndexError, "index out of bounds" func = getattr(sparsetools, self.ftype+'cscgetel') ind, val = func(self.data, self.rowind, self.indptr, row, col) return val elif isinstance(key, int): # Was: return self.data[key] # If this is allowed, it should return the relevant row, as for # dense matrices (and __len__ should be supported again). raise IndexError, "integer index not supported for csc_matrix" else: raise IndexError, "invalid index" | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
elif isinstance(key, int): | elif isintlike(key): | def __getitem__(self, key): if isinstance(key, tuple): row = key[0] col = key[1] if isinstance(row, slice): raise IndexError, "csr_matrix supports slices only of a single"\ " row" elif isinstance(col, slice): return self._getrowslice(row, col) M, N = self.shape if (row < 0): row = M + row if (col < 0): col = N + col if not (0<=row<M) or not (0<=col<N): raise IndexError, "index out of bounds" func = getattr(sparsetools, self.ftype+'cscgetel') ind, val = func(self.data, self.colind, self.indptr, col, row) return val elif isinstance(key, int): # If an integer index is allowed, it should return the relevant row, # as for dense matrices (and __len__ should be supported again). return self[key, :] else: raise IndexError, "invalid index" | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
assert isinstance(i, int) and isinstance(j, int) | assert isintlike(i) and isintlike(j) | def get(self, key, default=0.): """This overrides the dict.get method, providing type checking but otherwise equivalent functionality. """ try: i, j = key assert isinstance(i, int) and isinstance(j, int) except (AssertionError, TypeError, ValueError): raise IndexError, "index must be a pair of integers" try: assert not (i < 0 or i >= self.shape[0] or j < 0 or j >= self.shape[1]) except AssertionError: raise IndexError, "index out of bounds" return dict.get(self, key, default) | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
if isinstance(i, int): | if isintlike(i): | def __getitem__(self, key): """If key=(i,j) is a pair of integers, return the corresponding element. If either i or j is a slice or sequence, return a new sparse matrix with just these elements. """ try: assert len(key) == 2 except (AssertionError, TypeError): raise TypeError, "index must be a pair of integers or slices" i, j = key | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
if isinstance(j, int): | if isintlike(j): | def __getitem__(self, key): """If key=(i,j) is a pair of integers, return the corresponding element. If either i or j is a slice or sequence, return a new sparse matrix with just these elements. """ try: assert len(key) == 2 except (AssertionError, TypeError): raise TypeError, "index must be a pair of integers or slices" i, j = key | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
if isinstance(i, int) and isinstance(j, int): | if isintlike(i) and isintlike(j): | def __getitem__(self, key): """If key=(i,j) is a pair of integers, return the corresponding element. If either i or j is a slice or sequence, return a new sparse matrix with just these elements. """ try: assert len(key) == 2 except (AssertionError, TypeError): raise TypeError, "index must be a pair of integers or slices" i, j = key | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
if not isinstance(i, int): | if not isintlike(i): | def __getitem__(self, key): """If key=(i,j) is a pair of integers, return the corresponding element. If either i or j is a slice or sequence, return a new sparse matrix with just these elements. """ try: assert len(key) == 2 except (AssertionError, TypeError): raise TypeError, "index must be a pair of integers or slices" i, j = key | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
if isinstance(j, int): | if isintlike(j): | def __getitem__(self, key): """If key=(i,j) is a pair of integers, return the corresponding element. If either i or j is a slice or sequence, return a new sparse matrix with just these elements. """ try: assert len(key) == 2 except (AssertionError, TypeError): raise TypeError, "index must be a pair of integers or slices" i, j = key | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
if isinstance(i, int) and isinstance(j, int): | if isintlike(i) and isintlike(j): | def __setitem__(self, key, value): try: assert len(key) == 2 except (AssertionError, TypeError): raise TypeError, "index must be a pair of integers, slices, or" \ " sequences" i, j = key | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
if isinstance(value, int) and value == 0: | if isintlike(value) and value == 0: | def __setitem__(self, key, value): try: assert len(key) == 2 except (AssertionError, TypeError): raise TypeError, "index must be a pair of integers, slices, or" \ " sequences" i, j = key | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
if isinstance(value, float) or isinstance(value, int) or \ isinstance(value, complex): dict.__setitem__(self, key, value) | if isinstance(value, float) or isintlike(value) or \ isinstance(value, complex): dict.__setitem__(self, key, self.dtype.type(value)) | def __setitem__(self, key, value): try: assert len(key) == 2 except (AssertionError, TypeError): raise TypeError, "index must be a pair of integers, slices, or" \ " sequences" i, j = key | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
if not isinstance(i, int): | if not isintlike(i): | def __setitem__(self, key, value): try: assert len(key) == 2 except (AssertionError, TypeError): raise TypeError, "index must be a pair of integers, slices, or" \ " sequences" i, j = key | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
if not isinstance(dims, tuple) or not isinstance(dims[0], int): | if not isinstance(dims, tuple) or not isintlike(dims[0]): | def __init__(self, arg1, dims=None, dtype=None): spmatrix.__init__(self) if isinstance(arg1, tuple): try: obj, ij = arg1 except: raise TypeError, "invalid input format" elif arg1 is None: # clumsy! We should make ALL arguments # keyword arguments instead! # Initialize an empty matrix. if not isinstance(dims, tuple) or not isinstance(dims[0], int): raise TypeError, "dimensions not understood" self.shape = dims self.dtype = getdtype(dtype, default=float) self.data = array([]) self.row = array([]) self.col = array([]) self._check() return else: raise TypeError, "invalid input format" self.dtype = getdtype(dtype, obj, default=float) | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
elif isinstance(i, int): | elif isintlike(i): | def __getitem__(self, index): """Return the element(s) index=(i, j), where j may be a slice. This always returns a copy for consistency, since slices into Python lists return copies. """ try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if type(i) is slice: raise IndexError, "lil_matrix supports slices only of a single row" # TODO: add support for this, like in __setitem__ elif isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" elif operator.isSequenceType(i): raise NotImplementedError, "sequence indexing not yet fully supported" else: raise IndexError, "invalid index" row = self.rows[i] if type(j) is slice: start, stop, stride = j.indices(self.shape[1]) if stride != 1: raise ValueError, "slicing with step != 1 not supported" if min(start, stop) < 0 or max(start, stop-1) >= self.shape[1]: raise IndexError, "invalid index" if stop <= start: raise ValueError, "slice width must be >= 1" # Look up 'start' and 'stop' in column index startind = bisect_left(row, start) stopind = bisect_left(row, stop) new = lil_matrix((1, stop - start), dtype=self.dtype) new.data = [self.data[i][startind:stopind]] new.rows = [[colind - start for colind in row[startind:stopind]]] return new elif operator.isSequenceType(j): raise NotImplementedError, "sequence indexing not yet fully supported" elif isinstance(j, int): if not (j>=0 and j<self.shape[1]): raise IndexError, "lil_matrix index out of range" else: raise IndexError, "invalid index" pos = bisect_left(row, j) if pos == len(row) or row[pos] != j: # Element doesn't exist (is zero) return self.dtype.type(0) else: return self.data[i][pos] | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
elif isinstance(j, int): | elif isintlike(j): | def __getitem__(self, index): """Return the element(s) index=(i, j), where j may be a slice. This always returns a copy for consistency, since slices into Python lists return copies. """ try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if type(i) is slice: raise IndexError, "lil_matrix supports slices only of a single row" # TODO: add support for this, like in __setitem__ elif isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" elif operator.isSequenceType(i): raise NotImplementedError, "sequence indexing not yet fully supported" else: raise IndexError, "invalid index" row = self.rows[i] if type(j) is slice: start, stop, stride = j.indices(self.shape[1]) if stride != 1: raise ValueError, "slicing with step != 1 not supported" if min(start, stop) < 0 or max(start, stop-1) >= self.shape[1]: raise IndexError, "invalid index" if stop <= start: raise ValueError, "slice width must be >= 1" # Look up 'start' and 'stop' in column index startind = bisect_left(row, start) stopind = bisect_left(row, stop) new = lil_matrix((1, stop - start), dtype=self.dtype) new.data = [self.data[i][startind:stopind]] new.rows = [[colind - start for colind in row[startind:stopind]]] return new elif operator.isSequenceType(j): raise NotImplementedError, "sequence indexing not yet fully supported" elif isinstance(j, int): if not (j>=0 and j<self.shape[1]): raise IndexError, "lil_matrix index out of range" else: raise IndexError, "invalid index" pos = bisect_left(row, j) if pos == len(row) or row[pos] != j: # Element doesn't exist (is zero) return self.dtype.type(0) else: return self.data[i][pos] | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
if isinstance(i, int): | if isintlike(i): | def __setitem__(self, index, x): try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" else: if isinstance(i, slice): seq = xrange(i.start or 0, i.stop or self.shape[1], i.step or 1) elif operator.isSequenceType(i): seq = i else: raise IndexError, "invalid index" try: if not len(x) == len(seq): raise ValueError, "number of elements in source must be" \ " same as number of elements in destimation" except TypeError: # Either x or seq is not a sequence. Note that a sparse matrix # is also not a sequence under this definition. # Currently we don't support setting to/from non-sequence types. # This could be enhanced, though, to allow a scalar source, # and/or a sparse vector. raise TypeError, "unsupported type for lil_matrix.__setitem__" else: # Sequence: call __setitem__ recursively, once for each row for i in xrange(len(seq)): self[seq[i], index[1]] = x[i] return | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
if isinstance(j, int): | if isintlike(j): | def __setitem__(self, index, x): try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" else: if isinstance(i, slice): seq = xrange(i.start or 0, i.stop or self.shape[1], i.step or 1) elif operator.isSequenceType(i): seq = i else: raise IndexError, "invalid index" try: if not len(x) == len(seq): raise ValueError, "number of elements in source must be" \ " same as number of elements in destimation" except TypeError: # Either x or seq is not a sequence. Note that a sparse matrix # is also not a sequence under this definition. # Currently we don't support setting to/from non-sequence types. # This could be enhanced, though, to allow a scalar source, # and/or a sparse vector. raise TypeError, "unsupported type for lil_matrix.__setitem__" else: # Sequence: call __setitem__ recursively, once for each row for i in xrange(len(seq)): self[seq[i], index[1]] = x[i] return | 1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b4d620ba0b6a4971bb8e328ef4ed29cb29759a7/sparse.py |
def remove_bad_vals(x): # !! Fix axis order when interface changed. # mapping: # NaN -> 0 # Inf -> scipy.limits.double_max # -Inf -> scipy.limits.double_min y = nan_to_num(x) big = scipy.limits.double_max / 10 small = scipy.limits.double_min / 10 y = clip(y,small,big) return y | 5b43b2d6620ff522c8d669022f140895e631dfe3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/5b43b2d6620ff522c8d669022f140895e631dfe3/interface.py |
||
big = scipy.limits.double_max / 10 small = scipy.limits.double_min / 10 | big = limits.double_max / 10 small = limits.double_min / 10 | def remove_bad_vals(x): # !! Fix axis order when interface changed. # mapping: # NaN -> 0 # Inf -> scipy.limits.double_max # -Inf -> scipy.limits.double_min y = nan_to_num(x) big = scipy.limits.double_max / 10 small = scipy.limits.double_min / 10 y = clip(y,small,big) return y | 5b43b2d6620ff522c8d669022f140895e631dfe3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/5b43b2d6620ff522c8d669022f140895e631dfe3/interface.py |
if a[0] != 0.0: outb = b / a[0] outa = a / a[0] else: outb, outa = b, a | b,a = map(MLab.asarray,(b,a)) while a[0] == 0.0: a = a[1:] while b[0] == 0.0: b = b[1:] outb = b * (1.0) / a[0] outa = a * (1.0) / a[0] | def normalize(b,a): if a[0] != 0.0: outb = b / a[0] outa = a / a[0] else: outb, outa = b, a return outb, outa | 52477d3d8b9098fa5178fb8886e5ea9d40db9bb3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/52477d3d8b9098fa5178fb8886e5ea9d40db9bb3/filter_design.py |
def butter(N, Wn, bandtype='band', analog=0, output=''): """Butterworth digital and analog filter design. | def iirdesign(wp, ws, gpass, gstop, analog=0, ftype='ellip', output='ba'): """Complete IIR digital and analog filter design. """ try: ordfunc = filter_dict[ftype][1] except KeyError: raise ValueError, "Invalid IIR filter type." except IndexError: raise ValueError, "%s does not have order selection use iirfilter function." % ftype wp = r1array(wp) ws = r1array(ws) band_type = 2*(len(wp)-1) band_type +=1 if wp[0] >= ws[0]: band_type += 1 btype = {1:'lowpass', 2:'highpass', 3:'bandstop', 4:'bandpass'}[band_type] N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog) return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, ftype=ftype, output=output) def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=0, ftype='butter', output='ba'): """IIR digital and analog filter design given order and critical points. | def butter(N, Wn, bandtype='band', analog=0, output=''): """Butterworth digital and analog filter design. Description: Design an Nth order lowpass digital Butterworth filter and return the filter coefficients in (B,A) form. Inputs: """ #pre-warp frequencies for digital filter design if not analog: warped = 2*fs*tan(pi*Wn/fs) else: warped = Wn # convert to low-pass prototype # Get analog lowpass prototype # transform to lowpass, bandpass, highpass, or bandstop # Find discrete equivalent if necessary if not analog: pass # Transform to proper out type (pole-zero, state-space, numer-denom) pass | 52477d3d8b9098fa5178fb8886e5ea9d40db9bb3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/52477d3d8b9098fa5178fb8886e5ea9d40db9bb3/filter_design.py |
Design an Nth order lowpass digital Butterworth filter and return the filter coefficients in (B,A) form. | Design an Nth order lowpass digital or analog filter and return the filter coefficients in (B,A) (numerator, denominator) or (Z,P,K) form. | def butter(N, Wn, bandtype='band', analog=0, output=''): """Butterworth digital and analog filter design. Description: Design an Nth order lowpass digital Butterworth filter and return the filter coefficients in (B,A) form. Inputs: """ #pre-warp frequencies for digital filter design if not analog: warped = 2*fs*tan(pi*Wn/fs) else: warped = Wn # convert to low-pass prototype # Get analog lowpass prototype # transform to lowpass, bandpass, highpass, or bandstop # Find discrete equivalent if necessary if not analog: pass # Transform to proper out type (pole-zero, state-space, numer-denom) pass | 52477d3d8b9098fa5178fb8886e5ea9d40db9bb3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/52477d3d8b9098fa5178fb8886e5ea9d40db9bb3/filter_design.py |
""" | N -- the order of the filter. Wn -- a scalar or length-2 sequence giving the critical frequencies. rp, rs -- For chebyshev and elliptic filters provides the maximum ripple in the passband and the minimum attenuation in the stop band. btype -- the type of filter (lowpass, highpass, bandpass, or bandstop). analog -- non-zero to return an analog filter, otherwise a digital filter is returned. ftype -- the type of IIR filter (Butterworth, Cauer (Elliptic), Bessel, Chebyshev1, Chebyshev2) output -- 'ba' for (B,A) output, 'zpk' for (Z,P,K) output. """ ftype, btype, output = map(string.lower, (ftype, btype, output)) Wn = asarray(Wn) try: btype = band_dict[btype] except KeyError: raise ValueError, "%s is an invalid bandtype for filter." % btype try: typefunc = filter_dict[ftype][0] except KeyError: raise ValueError, "%s is not a valid basic iir filter." % ftype if output not in ['ba', 'zpk']: raise ValueError, "%s is not a valid output form." % output | def butter(N, Wn, bandtype='band', analog=0, output=''): """Butterworth digital and analog filter design. Description: Design an Nth order lowpass digital Butterworth filter and return the filter coefficients in (B,A) form. Inputs: """ #pre-warp frequencies for digital filter design if not analog: warped = 2*fs*tan(pi*Wn/fs) else: warped = Wn # convert to low-pass prototype # Get analog lowpass prototype # transform to lowpass, bandpass, highpass, or bandstop # Find discrete equivalent if necessary if not analog: pass # Transform to proper out type (pole-zero, state-space, numer-denom) pass | 52477d3d8b9098fa5178fb8886e5ea9d40db9bb3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/52477d3d8b9098fa5178fb8886e5ea9d40db9bb3/filter_design.py |
Subsets and Splits