123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409 |
- """Lite version of scipy.linalg.
- Notes
- -----
- This module is a lite version of the linalg.py module in SciPy which
- contains high-level Python interface to the LAPACK library. The lite
- version only accesses the following LAPACK functions: dgesv, zgesv,
- dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
- zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
- """
- from __future__ import division, absolute_import, print_function
- __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
- 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
- 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
- 'LinAlgError', 'multi_dot']
- import warnings
- from numpy.core import (
- array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
- csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
- add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
- finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
- broadcast, atleast_2d, intp, asanyarray, isscalar, object_
- )
- from numpy.lib import triu, asfarray
- from numpy.linalg import lapack_lite, _umath_linalg
- from numpy.matrixlib.defmatrix import matrix_power
- from numpy.compat import asbytes
- # For Python2/3 compatibility
- _N = asbytes('N')
- _V = asbytes('V')
- _A = asbytes('A')
- _S = asbytes('S')
- _L = asbytes('L')
- fortran_int = intc
- # Error object
- class LinAlgError(Exception):
- """
- Generic Python-exception-derived object raised by linalg functions.
- General purpose exception class, derived from Python's exception.Exception
- class, programmatically raised in linalg functions when a Linear
- Algebra-related condition would prevent further correct execution of the
- function.
- Parameters
- ----------
- None
- Examples
- --------
- >>> from numpy import linalg as LA
- >>> LA.inv(np.zeros((2,2)))
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- File "...linalg.py", line 350,
- in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
- File "...linalg.py", line 249,
- in solve
- raise LinAlgError('Singular matrix')
- numpy.linalg.LinAlgError: Singular matrix
- """
- pass
- # Dealing with errors in _umath_linalg
- _linalg_error_extobj = None
- def _determine_error_states():
- global _linalg_error_extobj
- errobj = geterrobj()
- bufsize = errobj[0]
- with errstate(invalid='call', over='ignore',
- divide='ignore', under='ignore'):
- invalid_call_errmask = geterrobj()[1]
- _linalg_error_extobj = [bufsize, invalid_call_errmask, None]
- _determine_error_states()
- def _raise_linalgerror_singular(err, flag):
- raise LinAlgError("Singular matrix")
- def _raise_linalgerror_nonposdef(err, flag):
- raise LinAlgError("Matrix is not positive definite")
- def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
- raise LinAlgError("Eigenvalues did not converge")
- def _raise_linalgerror_svd_nonconvergence(err, flag):
- raise LinAlgError("SVD did not converge")
- def get_linalg_error_extobj(callback):
- extobj = list(_linalg_error_extobj)
- extobj[2] = callback
- return extobj
- def _makearray(a):
- new = asarray(a)
- wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
- return new, wrap
- def isComplexType(t):
- return issubclass(t, complexfloating)
- _real_types_map = {single : single,
- double : double,
- csingle : single,
- cdouble : double}
- _complex_types_map = {single : csingle,
- double : cdouble,
- csingle : csingle,
- cdouble : cdouble}
- def _realType(t, default=double):
- return _real_types_map.get(t, default)
- def _complexType(t, default=cdouble):
- return _complex_types_map.get(t, default)
- def _linalgRealType(t):
- """Cast the type t to either double or cdouble."""
- return double
- _complex_types_map = {single : csingle,
- double : cdouble,
- csingle : csingle,
- cdouble : cdouble}
- def _commonType(*arrays):
- # in lite version, use higher precision (always double or cdouble)
- result_type = single
- is_complex = False
- for a in arrays:
- if issubclass(a.dtype.type, inexact):
- if isComplexType(a.dtype.type):
- is_complex = True
- rt = _realType(a.dtype.type, default=None)
- if rt is None:
- # unsupported inexact scalar
- raise TypeError("array type %s is unsupported in linalg" %
- (a.dtype.name,))
- else:
- rt = double
- if rt is double:
- result_type = double
- if is_complex:
- t = cdouble
- result_type = _complex_types_map[result_type]
- else:
- t = double
- return t, result_type
- # _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
- _fastCT = fastCopyAndTranspose
- def _to_native_byte_order(*arrays):
- ret = []
- for arr in arrays:
- if arr.dtype.byteorder not in ('=', '|'):
- ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
- else:
- ret.append(arr)
- if len(ret) == 1:
- return ret[0]
- else:
- return ret
- def _fastCopyAndTranspose(type, *arrays):
- cast_arrays = ()
- for a in arrays:
- if a.dtype.type is type:
- cast_arrays = cast_arrays + (_fastCT(a),)
- else:
- cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
- if len(cast_arrays) == 1:
- return cast_arrays[0]
- else:
- return cast_arrays
- def _assertRank2(*arrays):
- for a in arrays:
- if len(a.shape) != 2:
- raise LinAlgError('%d-dimensional array given. Array must be '
- 'two-dimensional' % len(a.shape))
- def _assertRankAtLeast2(*arrays):
- for a in arrays:
- if len(a.shape) < 2:
- raise LinAlgError('%d-dimensional array given. Array must be '
- 'at least two-dimensional' % len(a.shape))
- def _assertSquareness(*arrays):
- for a in arrays:
- if max(a.shape) != min(a.shape):
- raise LinAlgError('Array must be square')
- def _assertNdSquareness(*arrays):
- for a in arrays:
- if max(a.shape[-2:]) != min(a.shape[-2:]):
- raise LinAlgError('Last 2 dimensions of the array must be square')
- def _assertFinite(*arrays):
- for a in arrays:
- if not (isfinite(a).all()):
- raise LinAlgError("Array must not contain infs or NaNs")
- def _assertNoEmpty2d(*arrays):
- for a in arrays:
- if a.size == 0 and product(a.shape[-2:]) == 0:
- raise LinAlgError("Arrays cannot be empty")
- # Linear equations
- def tensorsolve(a, b, axes=None):
- """
- Solve the tensor equation ``a x = b`` for x.
- It is assumed that all indices of `x` are summed over in the product,
- together with the rightmost indices of `a`, as is done in, for example,
- ``tensordot(a, x, axes=len(b.shape))``.
- Parameters
- ----------
- a : array_like
- Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
- the shape of that sub-tensor of `a` consisting of the appropriate
- number of its rightmost indices, and must be such that
- ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
- 'square').
- b : array_like
- Right-hand tensor, which can be of any shape.
- axes : tuple of ints, optional
- Axes in `a` to reorder to the right, before inversion.
- If None (default), no reordering is done.
- Returns
- -------
- x : ndarray, shape Q
- Raises
- ------
- LinAlgError
- If `a` is singular or not 'square' (in the above sense).
- See Also
- --------
- tensordot, tensorinv, einsum
- Examples
- --------
- >>> a = np.eye(2*3*4)
- >>> a.shape = (2*3, 4, 2, 3, 4)
- >>> b = np.random.randn(2*3, 4)
- >>> x = np.linalg.tensorsolve(a, b)
- >>> x.shape
- (2, 3, 4)
- >>> np.allclose(np.tensordot(a, x, axes=3), b)
- True
- """
- a, wrap = _makearray(a)
- b = asarray(b)
- an = a.ndim
- if axes is not None:
- allaxes = list(range(0, an))
- for k in axes:
- allaxes.remove(k)
- allaxes.insert(an, k)
- a = a.transpose(allaxes)
- oldshape = a.shape[-(an-b.ndim):]
- prod = 1
- for k in oldshape:
- prod *= k
- a = a.reshape(-1, prod)
- b = b.ravel()
- res = wrap(solve(a, b))
- res.shape = oldshape
- return res
- def solve(a, b):
- """
- Solve a linear matrix equation, or system of linear scalar equations.
- Computes the "exact" solution, `x`, of the well-determined, i.e., full
- rank, linear matrix equation `ax = b`.
- Parameters
- ----------
- a : (..., M, M) array_like
- Coefficient matrix.
- b : {(..., M,), (..., M, K)}, array_like
- Ordinate or "dependent variable" values.
- Returns
- -------
- x : {(..., M,), (..., M, K)} ndarray
- Solution to the system a x = b. Returned shape is identical to `b`.
- Raises
- ------
- LinAlgError
- If `a` is singular or not square.
- Notes
- -----
- .. versionadded:: 1.8.0
- Broadcasting rules apply, see the `numpy.linalg` documentation for
- details.
- The solutions are computed using LAPACK routine _gesv
- `a` must be square and of full-rank, i.e., all rows (or, equivalently,
- columns) must be linearly independent; if either is not true, use
- `lstsq` for the least-squares best "solution" of the
- system/equation.
- References
- ----------
- .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
- FL, Academic Press, Inc., 1980, pg. 22.
- Examples
- --------
- Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
- >>> a = np.array([[3,1], [1,2]])
- >>> b = np.array([9,8])
- >>> x = np.linalg.solve(a, b)
- >>> x
- array([ 2., 3.])
- Check that the solution is correct:
- >>> np.allclose(np.dot(a, x), b)
- True
- """
- a, _ = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- b, wrap = _makearray(b)
- t, result_t = _commonType(a, b)
- # We use the b = (..., M,) logic, only if the number of extra dimensions
- # match exactly
- if b.ndim == a.ndim - 1:
- if a.shape[-1] == 0 and b.shape[-1] == 0:
- # Legal, but the ufunc cannot handle the 0-sized inner dims
- # let the ufunc handle all wrong cases.
- a = a.reshape(a.shape[:-1])
- bc = broadcast(a, b)
- return wrap(empty(bc.shape, dtype=result_t))
- gufunc = _umath_linalg.solve1
- else:
- if b.size == 0:
- if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
- a = a[:,:1].reshape(a.shape[:-1] + (1,))
- bc = broadcast(a, b)
- return wrap(empty(bc.shape, dtype=result_t))
- gufunc = _umath_linalg.solve
- signature = 'DD->D' if isComplexType(t) else 'dd->d'
- extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
- r = gufunc(a, b, signature=signature, extobj=extobj)
- return wrap(r.astype(result_t, copy=False))
- def tensorinv(a, ind=2):
- """
- Compute the 'inverse' of an N-dimensional array.
- The result is an inverse for `a` relative to the tensordot operation
- ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
- ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
- tensordot operation.
- Parameters
- ----------
- a : array_like
- Tensor to 'invert'. Its shape must be 'square', i. e.,
- ``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
- ind : int, optional
- Number of first indices that are involved in the inverse sum.
- Must be a positive integer, default is 2.
- Returns
- -------
- b : ndarray
- `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
- Raises
- ------
- LinAlgError
- If `a` is singular or not 'square' (in the above sense).
- See Also
- --------
- tensordot, tensorsolve
- Examples
- --------
- >>> a = np.eye(4*6)
- >>> a.shape = (4, 6, 8, 3)
- >>> ainv = np.linalg.tensorinv(a, ind=2)
- >>> ainv.shape
- (8, 3, 4, 6)
- >>> b = np.random.randn(4, 6)
- >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
- True
- >>> a = np.eye(4*6)
- >>> a.shape = (24, 8, 3)
- >>> ainv = np.linalg.tensorinv(a, ind=1)
- >>> ainv.shape
- (8, 3, 24)
- >>> b = np.random.randn(24)
- >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
- True
- """
- a = asarray(a)
- oldshape = a.shape
- prod = 1
- if ind > 0:
- invshape = oldshape[ind:] + oldshape[:ind]
- for k in oldshape[ind:]:
- prod *= k
- else:
- raise ValueError("Invalid ind argument.")
- a = a.reshape(prod, -1)
- ia = inv(a)
- return ia.reshape(*invshape)
- # Matrix inversion
- def inv(a):
- """
- Compute the (multiplicative) inverse of a matrix.
- Given a square matrix `a`, return the matrix `ainv` satisfying
- ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
- Parameters
- ----------
- a : (..., M, M) array_like
- Matrix to be inverted.
- Returns
- -------
- ainv : (..., M, M) ndarray or matrix
- (Multiplicative) inverse of the matrix `a`.
- Raises
- ------
- LinAlgError
- If `a` is not square or inversion fails.
- Notes
- -----
- .. versionadded:: 1.8.0
- Broadcasting rules apply, see the `numpy.linalg` documentation for
- details.
- Examples
- --------
- >>> from numpy.linalg import inv
- >>> a = np.array([[1., 2.], [3., 4.]])
- >>> ainv = inv(a)
- >>> np.allclose(np.dot(a, ainv), np.eye(2))
- True
- >>> np.allclose(np.dot(ainv, a), np.eye(2))
- True
- If a is a matrix object, then the return value is a matrix as well:
- >>> ainv = inv(np.matrix(a))
- >>> ainv
- matrix([[-2. , 1. ],
- [ 1.5, -0.5]])
- Inverses of several matrices can be computed at once:
- >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
- >>> inv(a)
- array([[[-2. , 1. ],
- [ 1.5, -0.5]],
- [[-5. , 2. ],
- [ 3. , -1. ]]])
- """
- a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- t, result_t = _commonType(a)
- if a.shape[-1] == 0:
- # The inner array is 0x0, the ufunc cannot handle this case
- return wrap(empty_like(a, dtype=result_t))
- signature = 'D->D' if isComplexType(t) else 'd->d'
- extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
- ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
- return wrap(ainv.astype(result_t, copy=False))
- # Cholesky decomposition
- def cholesky(a):
- """
- Cholesky decomposition.
- Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
- where `L` is lower-triangular and .H is the conjugate transpose operator
- (which is the ordinary transpose if `a` is real-valued). `a` must be
- Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
- actually returned.
- Parameters
- ----------
- a : (..., M, M) array_like
- Hermitian (symmetric if all elements are real), positive-definite
- input matrix.
- Returns
- -------
- L : (..., M, M) array_like
- Upper or lower-triangular Cholesky factor of `a`. Returns a
- matrix object if `a` is a matrix object.
- Raises
- ------
- LinAlgError
- If the decomposition fails, for example, if `a` is not
- positive-definite.
- Notes
- -----
- .. versionadded:: 1.8.0
- Broadcasting rules apply, see the `numpy.linalg` documentation for
- details.
- The Cholesky decomposition is often used as a fast way of solving
- .. math:: A \\mathbf{x} = \\mathbf{b}
- (when `A` is both Hermitian/symmetric and positive-definite).
- First, we solve for :math:`\\mathbf{y}` in
- .. math:: L \\mathbf{y} = \\mathbf{b},
- and then for :math:`\\mathbf{x}` in
- .. math:: L.H \\mathbf{x} = \\mathbf{y}.
- Examples
- --------
- >>> A = np.array([[1,-2j],[2j,5]])
- >>> A
- array([[ 1.+0.j, 0.-2.j],
- [ 0.+2.j, 5.+0.j]])
- >>> L = np.linalg.cholesky(A)
- >>> L
- array([[ 1.+0.j, 0.+0.j],
- [ 0.+2.j, 1.+0.j]])
- >>> np.dot(L, L.T.conj()) # verify that L * L.H = A
- array([[ 1.+0.j, 0.-2.j],
- [ 0.+2.j, 5.+0.j]])
- >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
- >>> np.linalg.cholesky(A) # an ndarray object is returned
- array([[ 1.+0.j, 0.+0.j],
- [ 0.+2.j, 1.+0.j]])
- >>> # But a matrix object is returned if A is a matrix object
- >>> LA.cholesky(np.matrix(A))
- matrix([[ 1.+0.j, 0.+0.j],
- [ 0.+2.j, 1.+0.j]])
- """
- extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
- gufunc = _umath_linalg.cholesky_lo
- a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- t, result_t = _commonType(a)
- signature = 'D->D' if isComplexType(t) else 'd->d'
- r = gufunc(a, signature=signature, extobj=extobj)
- return wrap(r.astype(result_t, copy=False))
- # QR decompostion
- def qr(a, mode='reduced'):
- """
- Compute the qr factorization of a matrix.
- Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
- upper-triangular.
- Parameters
- ----------
- a : array_like, shape (M, N)
- Matrix to be factored.
- mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
- If K = min(M, N), then
- 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
- 'complete' : returns q, r with dimensions (M, M), (M, N)
- 'r' : returns r only with dimensions (K, N)
- 'raw' : returns h, tau with dimensions (N, M), (K,)
- 'full' : alias of 'reduced', deprecated
- 'economic' : returns h from 'raw', deprecated.
- The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
- see the notes for more information. The default is 'reduced' and to
- maintain backward compatibility with earlier versions of numpy both
- it and the old default 'full' can be omitted. Note that array h
- returned in 'raw' mode is transposed for calling Fortran. The
- 'economic' mode is deprecated. The modes 'full' and 'economic' may
- be passed using only the first letter for backwards compatibility,
- but all others must be spelled out. See the Notes for more
- explanation.
- Returns
- -------
- q : ndarray of float or complex, optional
- A matrix with orthonormal columns. When mode = 'complete' the
- result is an orthogonal/unitary matrix depending on whether or not
- a is real/complex. The determinant may be either +/- 1 in that
- case.
- r : ndarray of float or complex, optional
- The upper-triangular matrix.
- (h, tau) : ndarrays of np.double or np.cdouble, optional
- The array h contains the Householder reflectors that generate q
- along with r. The tau array contains scaling factors for the
- reflectors. In the deprecated 'economic' mode only h is returned.
- Raises
- ------
- LinAlgError
- If factoring fails.
- Notes
- -----
- This is an interface to the LAPACK routines dgeqrf, zgeqrf,
- dorgqr, and zungqr.
- For more information on the qr factorization, see for example:
- http://en.wikipedia.org/wiki/QR_factorization
- Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
- `a` is of type `matrix`, all the return values will be matrices too.
- New 'reduced', 'complete', and 'raw' options for mode were added in
- Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
- addition the options 'full' and 'economic' were deprecated. Because
- 'full' was the previous default and 'reduced' is the new default,
- backward compatibility can be maintained by letting `mode` default.
- The 'raw' option was added so that LAPACK routines that can multiply
- arrays by q using the Householder reflectors can be used. Note that in
- this case the returned arrays are of type np.double or np.cdouble and
- the h array is transposed to be FORTRAN compatible. No routines using
- the 'raw' return are currently exposed by numpy, but some are available
- in lapack_lite and just await the necessary work.
- Examples
- --------
- >>> a = np.random.randn(9, 6)
- >>> q, r = np.linalg.qr(a)
- >>> np.allclose(a, np.dot(q, r)) # a does equal qr
- True
- >>> r2 = np.linalg.qr(a, mode='r')
- >>> r3 = np.linalg.qr(a, mode='economic')
- >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
- True
- >>> # But only triu parts are guaranteed equal when mode='economic'
- >>> np.allclose(r, np.triu(r3[:6,:6], k=0))
- True
- Example illustrating a common use of `qr`: solving of least squares
- problems
- What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
- the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
- and you'll see that it should be y0 = 0, m = 1.) The answer is provided
- by solving the over-determined matrix equation ``Ax = b``, where::
- A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
- x = array([[y0], [m]])
- b = array([[1], [0], [2], [1]])
- If A = qr such that q is orthonormal (which is always possible via
- Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
- however, we simply use `lstsq`.)
- >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
- >>> A
- array([[0, 1],
- [1, 1],
- [1, 1],
- [2, 1]])
- >>> b = np.array([1, 0, 2, 1])
- >>> q, r = LA.qr(A)
- >>> p = np.dot(q.T, b)
- >>> np.dot(LA.inv(r), p)
- array([ 1.1e-16, 1.0e+00])
- """
- if mode not in ('reduced', 'complete', 'r', 'raw'):
- if mode in ('f', 'full'):
- # 2013-04-01, 1.8
- msg = "".join((
- "The 'full' option is deprecated in favor of 'reduced'.\n",
- "For backward compatibility let mode default."))
- warnings.warn(msg, DeprecationWarning)
- mode = 'reduced'
- elif mode in ('e', 'economic'):
- # 2013-04-01, 1.8
- msg = "The 'economic' option is deprecated.",
- warnings.warn(msg, DeprecationWarning)
- mode = 'economic'
- else:
- raise ValueError("Unrecognized mode '%s'" % mode)
- a, wrap = _makearray(a)
- _assertRank2(a)
- _assertNoEmpty2d(a)
- m, n = a.shape
- t, result_t = _commonType(a)
- a = _fastCopyAndTranspose(t, a)
- a = _to_native_byte_order(a)
- mn = min(m, n)
- tau = zeros((mn,), t)
- if isComplexType(t):
- lapack_routine = lapack_lite.zgeqrf
- routine_name = 'zgeqrf'
- else:
- lapack_routine = lapack_lite.dgeqrf
- routine_name = 'dgeqrf'
- # calculate optimal size of work data 'work'
- lwork = 1
- work = zeros((lwork,), t)
- results = lapack_routine(m, n, a, m, tau, work, -1, 0)
- if results['info'] != 0:
- raise LinAlgError('%s returns %d' % (routine_name, results['info']))
- # do qr decomposition
- lwork = int(abs(work[0]))
- work = zeros((lwork,), t)
- results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
- if results['info'] != 0:
- raise LinAlgError('%s returns %d' % (routine_name, results['info']))
- # handle modes that don't return q
- if mode == 'r':
- r = _fastCopyAndTranspose(result_t, a[:, :mn])
- return wrap(triu(r))
- if mode == 'raw':
- return a, tau
- if mode == 'economic':
- if t != result_t :
- a = a.astype(result_t, copy=False)
- return wrap(a.T)
- # generate q from a
- if mode == 'complete' and m > n:
- mc = m
- q = empty((m, m), t)
- else:
- mc = mn
- q = empty((n, m), t)
- q[:n] = a
- if isComplexType(t):
- lapack_routine = lapack_lite.zungqr
- routine_name = 'zungqr'
- else:
- lapack_routine = lapack_lite.dorgqr
- routine_name = 'dorgqr'
- # determine optimal lwork
- lwork = 1
- work = zeros((lwork,), t)
- results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
- if results['info'] != 0:
- raise LinAlgError('%s returns %d' % (routine_name, results['info']))
- # compute q
- lwork = int(abs(work[0]))
- work = zeros((lwork,), t)
- results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
- if results['info'] != 0:
- raise LinAlgError('%s returns %d' % (routine_name, results['info']))
- q = _fastCopyAndTranspose(result_t, q[:mc])
- r = _fastCopyAndTranspose(result_t, a[:, :mc])
- return wrap(q), wrap(triu(r))
- # Eigenvalues
- def eigvals(a):
- """
- Compute the eigenvalues of a general matrix.
- Main difference between `eigvals` and `eig`: the eigenvectors aren't
- returned.
- Parameters
- ----------
- a : (..., M, M) array_like
- A complex- or real-valued matrix whose eigenvalues will be computed.
- Returns
- -------
- w : (..., M,) ndarray
- The eigenvalues, each repeated according to its multiplicity.
- They are not necessarily ordered, nor are they necessarily
- real for real matrices.
- Raises
- ------
- LinAlgError
- If the eigenvalue computation does not converge.
- See Also
- --------
- eig : eigenvalues and right eigenvectors of general arrays
- eigvalsh : eigenvalues of symmetric or Hermitian arrays.
- eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
- Notes
- -----
- .. versionadded:: 1.8.0
- Broadcasting rules apply, see the `numpy.linalg` documentation for
- details.
- This is implemented using the _geev LAPACK routines which compute
- the eigenvalues and eigenvectors of general square arrays.
- Examples
- --------
- Illustration, using the fact that the eigenvalues of a diagonal matrix
- are its diagonal elements, that multiplying a matrix on the left
- by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
- of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
- if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
- ``A``:
- >>> from numpy import linalg as LA
- >>> x = np.random.random()
- >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
- >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
- (1.0, 1.0, 0.0)
- Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
- >>> D = np.diag((-1,1))
- >>> LA.eigvals(D)
- array([-1., 1.])
- >>> A = np.dot(Q, D)
- >>> A = np.dot(A, Q.T)
- >>> LA.eigvals(A)
- array([ 1., -1.])
- """
- a, wrap = _makearray(a)
- _assertNoEmpty2d(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- _assertFinite(a)
- t, result_t = _commonType(a)
- extobj = get_linalg_error_extobj(
- _raise_linalgerror_eigenvalues_nonconvergence)
- signature = 'D->D' if isComplexType(t) else 'd->D'
- w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
- if not isComplexType(t):
- if all(w.imag == 0):
- w = w.real
- result_t = _realType(result_t)
- else:
- result_t = _complexType(result_t)
- return w.astype(result_t, copy=False)
- def eigvalsh(a, UPLO='L'):
- """
- Compute the eigenvalues of a Hermitian or real symmetric matrix.
- Main difference from eigh: the eigenvectors are not computed.
- Parameters
- ----------
- a : (..., M, M) array_like
- A complex- or real-valued matrix whose eigenvalues are to be
- computed.
- UPLO : {'L', 'U'}, optional
- Same as `lower`, with 'L' for lower and 'U' for upper triangular.
- Deprecated.
- Returns
- -------
- w : (..., M,) ndarray
- The eigenvalues in ascending order, each repeated according to
- its multiplicity.
- Raises
- ------
- LinAlgError
- If the eigenvalue computation does not converge.
- See Also
- --------
- eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
- eigvals : eigenvalues of general real or complex arrays.
- eig : eigenvalues and right eigenvectors of general real or complex
- arrays.
- Notes
- -----
- .. versionadded:: 1.8.0
- Broadcasting rules apply, see the `numpy.linalg` documentation for
- details.
- The eigenvalues are computed using LAPACK routines _syevd, _heevd
- Examples
- --------
- >>> from numpy import linalg as LA
- >>> a = np.array([[1, -2j], [2j, 5]])
- >>> LA.eigvalsh(a)
- array([ 0.17157288, 5.82842712])
- """
- UPLO = UPLO.upper()
- if UPLO not in ('L', 'U'):
- raise ValueError("UPLO argument must be 'L' or 'U'")
- extobj = get_linalg_error_extobj(
- _raise_linalgerror_eigenvalues_nonconvergence)
- if UPLO == 'L':
- gufunc = _umath_linalg.eigvalsh_lo
- else:
- gufunc = _umath_linalg.eigvalsh_up
- a, wrap = _makearray(a)
- _assertNoEmpty2d(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- t, result_t = _commonType(a)
- signature = 'D->d' if isComplexType(t) else 'd->d'
- w = gufunc(a, signature=signature, extobj=extobj)
- return w.astype(_realType(result_t), copy=False)
- def _convertarray(a):
- t, result_t = _commonType(a)
- a = _fastCT(a.astype(t))
- return a, t, result_t
- # Eigenvectors
- def eig(a):
- """
- Compute the eigenvalues and right eigenvectors of a square array.
- Parameters
- ----------
- a : (..., M, M) array
- Matrices for which the eigenvalues and right eigenvectors will
- be computed
- Returns
- -------
- w : (..., M) array
- The eigenvalues, each repeated according to its multiplicity.
- The eigenvalues are not necessarily ordered. The resulting
- array will be of complex type, unless the imaginary part is
- zero in which case it will be cast to a real type. When `a`
- is real the resulting eigenvalues will be real (0 imaginary
- part) or occur in conjugate pairs
- v : (..., M, M) array
- The normalized (unit "length") eigenvectors, such that the
- column ``v[:,i]`` is the eigenvector corresponding to the
- eigenvalue ``w[i]``.
- Raises
- ------
- LinAlgError
- If the eigenvalue computation does not converge.
- See Also
- --------
- eigvals : eigenvalues of a non-symmetric array.
- eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
- (conjugate symmetric) array.
- eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
- array.
- Notes
- -----
- .. versionadded:: 1.8.0
- Broadcasting rules apply, see the `numpy.linalg` documentation for
- details.
- This is implemented using the _geev LAPACK routines which compute
- the eigenvalues and eigenvectors of general square arrays.
- The number `w` is an eigenvalue of `a` if there exists a vector
- `v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
- `v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
- for :math:`i \\in \\{0,...,M-1\\}`.
- The array `v` of eigenvectors may not be of maximum rank, that is, some
- of the columns may be linearly dependent, although round-off error may
- obscure that fact. If the eigenvalues are all different, then theoretically
- the eigenvectors are linearly independent. Likewise, the (complex-valued)
- matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
- if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
- transpose of `a`.
- Finally, it is emphasized that `v` consists of the *right* (as in
- right-hand side) eigenvectors of `a`. A vector `y` satisfying
- ``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
- eigenvector of `a`, and, in general, the left and right eigenvectors
- of a matrix are not necessarily the (perhaps conjugate) transposes
- of each other.
- References
- ----------
- G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
- Academic Press, Inc., 1980, Various pp.
- Examples
- --------
- >>> from numpy import linalg as LA
- (Almost) trivial example with real e-values and e-vectors.
- >>> w, v = LA.eig(np.diag((1, 2, 3)))
- >>> w; v
- array([ 1., 2., 3.])
- array([[ 1., 0., 0.],
- [ 0., 1., 0.],
- [ 0., 0., 1.]])
- Real matrix possessing complex e-values and e-vectors; note that the
- e-values are complex conjugates of each other.
- >>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
- >>> w; v
- array([ 1. + 1.j, 1. - 1.j])
- array([[ 0.70710678+0.j , 0.70710678+0.j ],
- [ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
- Complex-valued matrix with real e-values (but complex-valued e-vectors);
- note that a.conj().T = a, i.e., a is Hermitian.
- >>> a = np.array([[1, 1j], [-1j, 1]])
- >>> w, v = LA.eig(a)
- >>> w; v
- array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
- array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
- [ 0.70710678+0.j , 0.00000000+0.70710678j]])
- Be careful about round-off error!
- >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
- >>> # Theor. e-values are 1 +/- 1e-9
- >>> w, v = LA.eig(a)
- >>> w; v
- array([ 1., 1.])
- array([[ 1., 0.],
- [ 0., 1.]])
- """
- a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- _assertFinite(a)
- t, result_t = _commonType(a)
- extobj = get_linalg_error_extobj(
- _raise_linalgerror_eigenvalues_nonconvergence)
- signature = 'D->DD' if isComplexType(t) else 'd->DD'
- w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
- if not isComplexType(t) and all(w.imag == 0.0):
- w = w.real
- vt = vt.real
- result_t = _realType(result_t)
- else:
- result_t = _complexType(result_t)
- vt = vt.astype(result_t, copy=False)
- return w.astype(result_t, copy=False), wrap(vt)
- def eigh(a, UPLO='L'):
- """
- Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
- Returns two objects, a 1-D array containing the eigenvalues of `a`, and
- a 2-D square array or matrix (depending on the input type) of the
- corresponding eigenvectors (in columns).
- Parameters
- ----------
- a : (..., M, M) array
- Hermitian/Symmetric matrices whose eigenvalues and
- eigenvectors are to be computed.
- UPLO : {'L', 'U'}, optional
- Specifies whether the calculation is done with the lower triangular
- part of `a` ('L', default) or the upper triangular part ('U').
- Returns
- -------
- w : (..., M) ndarray
- The eigenvalues in ascending order, each repeated according to
- its multiplicity.
- v : {(..., M, M) ndarray, (..., M, M) matrix}
- The column ``v[:, i]`` is the normalized eigenvector corresponding
- to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
- a matrix object.
- Raises
- ------
- LinAlgError
- If the eigenvalue computation does not converge.
- See Also
- --------
- eigvalsh : eigenvalues of symmetric or Hermitian arrays.
- eig : eigenvalues and right eigenvectors for non-symmetric arrays.
- eigvals : eigenvalues of non-symmetric arrays.
- Notes
- -----
- .. versionadded:: 1.8.0
- Broadcasting rules apply, see the `numpy.linalg` documentation for
- details.
- The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
- _heevd
- The eigenvalues of real symmetric or complex Hermitian matrices are
- always real. [1]_ The array `v` of (column) eigenvectors is unitary
- and `a`, `w`, and `v` satisfy the equations
- ``dot(a, v[:, i]) = w[i] * v[:, i]``.
- References
- ----------
- .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
- FL, Academic Press, Inc., 1980, pg. 222.
- Examples
- --------
- >>> from numpy import linalg as LA
- >>> a = np.array([[1, -2j], [2j, 5]])
- >>> a
- array([[ 1.+0.j, 0.-2.j],
- [ 0.+2.j, 5.+0.j]])
- >>> w, v = LA.eigh(a)
- >>> w; v
- array([ 0.17157288, 5.82842712])
- array([[-0.92387953+0.j , -0.38268343+0.j ],
- [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
- >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
- array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
- >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
- array([ 0.+0.j, 0.+0.j])
- >>> A = np.matrix(a) # what happens if input is a matrix object
- >>> A
- matrix([[ 1.+0.j, 0.-2.j],
- [ 0.+2.j, 5.+0.j]])
- >>> w, v = LA.eigh(A)
- >>> w; v
- array([ 0.17157288, 5.82842712])
- matrix([[-0.92387953+0.j , -0.38268343+0.j ],
- [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
- """
- UPLO = UPLO.upper()
- if UPLO not in ('L', 'U'):
- raise ValueError("UPLO argument must be 'L' or 'U'")
- a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- t, result_t = _commonType(a)
- extobj = get_linalg_error_extobj(
- _raise_linalgerror_eigenvalues_nonconvergence)
- if UPLO == 'L':
- gufunc = _umath_linalg.eigh_lo
- else:
- gufunc = _umath_linalg.eigh_up
- signature = 'D->dD' if isComplexType(t) else 'd->dd'
- w, vt = gufunc(a, signature=signature, extobj=extobj)
- w = w.astype(_realType(result_t), copy=False)
- vt = vt.astype(result_t, copy=False)
- return w, wrap(vt)
- # Singular value decomposition
- def svd(a, full_matrices=1, compute_uv=1):
- """
- Singular Value Decomposition.
- Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
- are unitary and `s` is a 1-d array of `a`'s singular values.
- Parameters
- ----------
- a : (..., M, N) array_like
- A real or complex matrix of shape (`M`, `N`) .
- full_matrices : bool, optional
- If True (default), `u` and `v` have the shapes (`M`, `M`) and
- (`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
- and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
- compute_uv : bool, optional
- Whether or not to compute `u` and `v` in addition to `s`. True
- by default.
- Returns
- -------
- u : { (..., M, M), (..., M, K) } array
- Unitary matrices. The actual shape depends on the value of
- ``full_matrices``. Only returned when ``compute_uv`` is True.
- s : (..., K) array
- The singular values for every matrix, sorted in descending order.
- v : { (..., N, N), (..., K, N) } array
- Unitary matrices. The actual shape depends on the value of
- ``full_matrices``. Only returned when ``compute_uv`` is True.
- Raises
- ------
- LinAlgError
- If SVD computation does not converge.
- Notes
- -----
- .. versionadded:: 1.8.0
- Broadcasting rules apply, see the `numpy.linalg` documentation for
- details.
- The decomposition is performed using LAPACK routine _gesdd
- The SVD is commonly written as ``a = U S V.H``. The `v` returned
- by this function is ``V.H`` and ``u = U``.
- If ``U`` is a unitary matrix, it means that it
- satisfies ``U.H = inv(U)``.
- The rows of `v` are the eigenvectors of ``a.H a``. The columns
- of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
- `v` and column ``i`` in `u`, the corresponding eigenvalue is
- ``s[i]**2``.
- If `a` is a `matrix` object (as opposed to an `ndarray`), then so
- are all the return values.
- Examples
- --------
- >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
- Reconstruction based on full SVD:
- >>> U, s, V = np.linalg.svd(a, full_matrices=True)
- >>> U.shape, V.shape, s.shape
- ((9, 9), (6, 6), (6,))
- >>> S = np.zeros((9, 6), dtype=complex)
- >>> S[:6, :6] = np.diag(s)
- >>> np.allclose(a, np.dot(U, np.dot(S, V)))
- True
- Reconstruction based on reduced SVD:
- >>> U, s, V = np.linalg.svd(a, full_matrices=False)
- >>> U.shape, V.shape, s.shape
- ((9, 6), (6, 6), (6,))
- >>> S = np.diag(s)
- >>> np.allclose(a, np.dot(U, np.dot(S, V)))
- True
- """
- a, wrap = _makearray(a)
- _assertNoEmpty2d(a)
- _assertRankAtLeast2(a)
- t, result_t = _commonType(a)
- extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
- m = a.shape[-2]
- n = a.shape[-1]
- if compute_uv:
- if full_matrices:
- if m < n:
- gufunc = _umath_linalg.svd_m_f
- else:
- gufunc = _umath_linalg.svd_n_f
- else:
- if m < n:
- gufunc = _umath_linalg.svd_m_s
- else:
- gufunc = _umath_linalg.svd_n_s
- signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
- u, s, vt = gufunc(a, signature=signature, extobj=extobj)
- u = u.astype(result_t, copy=False)
- s = s.astype(_realType(result_t), copy=False)
- vt = vt.astype(result_t, copy=False)
- return wrap(u), s, wrap(vt)
- else:
- if m < n:
- gufunc = _umath_linalg.svd_m
- else:
- gufunc = _umath_linalg.svd_n
- signature = 'D->d' if isComplexType(t) else 'd->d'
- s = gufunc(a, signature=signature, extobj=extobj)
- s = s.astype(_realType(result_t), copy=False)
- return s
- def cond(x, p=None):
- """
- Compute the condition number of a matrix.
- This function is capable of returning the condition number using
- one of seven different norms, depending on the value of `p` (see
- Parameters below).
- Parameters
- ----------
- x : (..., M, N) array_like
- The matrix whose condition number is sought.
- p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
- Order of the norm:
- ===== ============================
- p norm for matrices
- ===== ============================
- None 2-norm, computed directly using the ``SVD``
- 'fro' Frobenius norm
- inf max(sum(abs(x), axis=1))
- -inf min(sum(abs(x), axis=1))
- 1 max(sum(abs(x), axis=0))
- -1 min(sum(abs(x), axis=0))
- 2 2-norm (largest sing. value)
- -2 smallest singular value
- ===== ============================
- inf means the numpy.inf object, and the Frobenius norm is
- the root-of-sum-of-squares norm.
- Returns
- -------
- c : {float, inf}
- The condition number of the matrix. May be infinite.
- See Also
- --------
- numpy.linalg.norm
- Notes
- -----
- The condition number of `x` is defined as the norm of `x` times the
- norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
- (root-of-sum-of-squares) or one of a number of other matrix norms.
- References
- ----------
- .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
- Academic Press, Inc., 1980, pg. 285.
- Examples
- --------
- >>> from numpy import linalg as LA
- >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
- >>> a
- array([[ 1, 0, -1],
- [ 0, 1, 0],
- [ 1, 0, 1]])
- >>> LA.cond(a)
- 1.4142135623730951
- >>> LA.cond(a, 'fro')
- 3.1622776601683795
- >>> LA.cond(a, np.inf)
- 2.0
- >>> LA.cond(a, -np.inf)
- 1.0
- >>> LA.cond(a, 1)
- 2.0
- >>> LA.cond(a, -1)
- 1.0
- >>> LA.cond(a, 2)
- 1.4142135623730951
- >>> LA.cond(a, -2)
- 0.70710678118654746
- >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
- 0.70710678118654746
- """
- x = asarray(x) # in case we have a matrix
- if p is None:
- s = svd(x, compute_uv=False)
- return s[..., 0]/s[..., -1]
- else:
- return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
- def matrix_rank(M, tol=None):
- """
- Return matrix rank of array using SVD method
- Rank of the array is the number of SVD singular values of the array that are
- greater than `tol`.
- Parameters
- ----------
- M : {(M,), (M, N)} array_like
- array of <=2 dimensions
- tol : {None, float}, optional
- threshold below which SVD values are considered zero. If `tol` is
- None, and ``S`` is an array with singular values for `M`, and
- ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
- set to ``S.max() * max(M.shape) * eps``.
- Notes
- -----
- The default threshold to detect rank deficiency is a test on the magnitude
- of the singular values of `M`. By default, we identify singular values less
- than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
- the symbols defined above). This is the algorithm MATLAB uses [1]. It also
- appears in *Numerical recipes* in the discussion of SVD solutions for linear
- least squares [2].
- This default threshold is designed to detect rank deficiency accounting for
- the numerical errors of the SVD computation. Imagine that there is a column
- in `M` that is an exact (in floating point) linear combination of other
- columns in `M`. Computing the SVD on `M` will not produce a singular value
- exactly equal to 0 in general: any difference of the smallest SVD value from
- 0 will be caused by numerical imprecision in the calculation of the SVD.
- Our threshold for small SVD values takes this numerical imprecision into
- account, and the default threshold will detect such numerical rank
- deficiency. The threshold may declare a matrix `M` rank deficient even if
- the linear combination of some columns of `M` is not exactly equal to
- another column of `M` but only numerically very close to another column of
- `M`.
- We chose our default threshold because it is in wide use. Other thresholds
- are possible. For example, elsewhere in the 2007 edition of *Numerical
- recipes* there is an alternative threshold of ``S.max() *
- np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
- this threshold as being based on "expected roundoff error" (p 71).
- The thresholds above deal with floating point roundoff error in the
- calculation of the SVD. However, you may have more information about the
- sources of error in `M` that would make you consider other tolerance values
- to detect *effective* rank deficiency. The most useful measure of the
- tolerance depends on the operations you intend to use on your matrix. For
- example, if your data come from uncertain measurements with uncertainties
- greater than floating point epsilon, choosing a tolerance near that
- uncertainty may be preferable. The tolerance may be absolute if the
- uncertainties are absolute rather than relative.
- References
- ----------
- .. [1] MATLAB reference documention, "Rank"
- http://www.mathworks.com/help/techdoc/ref/rank.html
- .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
- "Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
- page 795.
- Examples
- --------
- >>> from numpy.linalg import matrix_rank
- >>> matrix_rank(np.eye(4)) # Full rank matrix
- 4
- >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
- >>> matrix_rank(I)
- 3
- >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
- 1
- >>> matrix_rank(np.zeros((4,)))
- 0
- """
- M = asarray(M)
- if M.ndim > 2:
- raise TypeError('array should have 2 or fewer dimensions')
- if M.ndim < 2:
- return int(not all(M==0))
- S = svd(M, compute_uv=False)
- if tol is None:
- tol = S.max() * max(M.shape) * finfo(S.dtype).eps
- return sum(S > tol)
- # Generalized inverse
- def pinv(a, rcond=1e-15 ):
- """
- Compute the (Moore-Penrose) pseudo-inverse of a matrix.
- Calculate the generalized inverse of a matrix using its
- singular-value decomposition (SVD) and including all
- *large* singular values.
- Parameters
- ----------
- a : (M, N) array_like
- Matrix to be pseudo-inverted.
- rcond : float
- Cutoff for small singular values.
- Singular values smaller (in modulus) than
- `rcond` * largest_singular_value (again, in modulus)
- are set to zero.
- Returns
- -------
- B : (N, M) ndarray
- The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
- is `B`.
- Raises
- ------
- LinAlgError
- If the SVD computation does not converge.
- Notes
- -----
- The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
- defined as: "the matrix that 'solves' [the least-squares problem]
- :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
- :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
- It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
- value decomposition of A, then
- :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
- orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
- of A's so-called singular values, (followed, typically, by
- zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
- consisting of the reciprocals of A's singular values
- (again, followed by zeros). [1]_
- References
- ----------
- .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
- FL, Academic Press, Inc., 1980, pp. 139-142.
- Examples
- --------
- The following example checks that ``a * a+ * a == a`` and
- ``a+ * a * a+ == a+``:
- >>> a = np.random.randn(9, 6)
- >>> B = np.linalg.pinv(a)
- >>> np.allclose(a, np.dot(a, np.dot(B, a)))
- True
- >>> np.allclose(B, np.dot(B, np.dot(a, B)))
- True
- """
- a, wrap = _makearray(a)
- _assertNoEmpty2d(a)
- a = a.conjugate()
- u, s, vt = svd(a, 0)
- m = u.shape[0]
- n = vt.shape[1]
- cutoff = rcond*maximum.reduce(s)
- for i in range(min(n, m)):
- if s[i] > cutoff:
- s[i] = 1./s[i]
- else:
- s[i] = 0.
- res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
- return wrap(res)
- # Determinant
- def slogdet(a):
- """
- Compute the sign and (natural) logarithm of the determinant of an array.
- If an array has a very small or very large determinant, then a call to
- `det` may overflow or underflow. This routine is more robust against such
- issues, because it computes the logarithm of the determinant rather than
- the determinant itself.
- Parameters
- ----------
- a : (..., M, M) array_like
- Input array, has to be a square 2-D array.
- Returns
- -------
- sign : (...) array_like
- A number representing the sign of the determinant. For a real matrix,
- this is 1, 0, or -1. For a complex matrix, this is a complex number
- with absolute value 1 (i.e., it is on the unit circle), or else 0.
- logdet : (...) array_like
- The natural log of the absolute value of the determinant.
- If the determinant is zero, then `sign` will be 0 and `logdet` will be
- -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
- See Also
- --------
- det
- Notes
- -----
- .. versionadded:: 1.8.0
- Broadcasting rules apply, see the `numpy.linalg` documentation for
- details.
- .. versionadded:: 1.6.0.
- The determinant is computed via LU factorization using the LAPACK
- routine z/dgetrf.
- Examples
- --------
- The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
- >>> a = np.array([[1, 2], [3, 4]])
- >>> (sign, logdet) = np.linalg.slogdet(a)
- >>> (sign, logdet)
- (-1, 0.69314718055994529)
- >>> sign * np.exp(logdet)
- -2.0
- Computing log-determinants for a stack of matrices:
- >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
- >>> a.shape
- (3, 2, 2)
- >>> sign, logdet = np.linalg.slogdet(a)
- >>> (sign, logdet)
- (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
- >>> sign * np.exp(logdet)
- array([-2., -3., -8.])
- This routine succeeds where ordinary `det` does not:
- >>> np.linalg.det(np.eye(500) * 0.1)
- 0.0
- >>> np.linalg.slogdet(np.eye(500) * 0.1)
- (1, -1151.2925464970228)
- """
- a = asarray(a)
- _assertNoEmpty2d(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- t, result_t = _commonType(a)
- real_t = _realType(result_t)
- signature = 'D->Dd' if isComplexType(t) else 'd->dd'
- sign, logdet = _umath_linalg.slogdet(a, signature=signature)
- if isscalar(sign):
- sign = sign.astype(result_t)
- else:
- sign = sign.astype(result_t, copy=False)
- if isscalar(logdet):
- logdet = logdet.astype(real_t)
- else:
- logdet = logdet.astype(real_t, copy=False)
- return sign, logdet
- def det(a):
- """
- Compute the determinant of an array.
- Parameters
- ----------
- a : (..., M, M) array_like
- Input array to compute determinants for.
- Returns
- -------
- det : (...) array_like
- Determinant of `a`.
- See Also
- --------
- slogdet : Another way to representing the determinant, more suitable
- for large matrices where underflow/overflow may occur.
- Notes
- -----
- .. versionadded:: 1.8.0
- Broadcasting rules apply, see the `numpy.linalg` documentation for
- details.
- The determinant is computed via LU factorization using the LAPACK
- routine z/dgetrf.
- Examples
- --------
- The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
- >>> a = np.array([[1, 2], [3, 4]])
- >>> np.linalg.det(a)
- -2.0
- Computing determinants for a stack of matrices:
- >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
- >>> a.shape
- (3, 2, 2)
- >>> np.linalg.det(a)
- array([-2., -3., -8.])
- """
- a = asarray(a)
- _assertNoEmpty2d(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- t, result_t = _commonType(a)
- signature = 'D->D' if isComplexType(t) else 'd->d'
- r = _umath_linalg.det(a, signature=signature)
- if isscalar(r):
- r = r.astype(result_t)
- else:
- r = r.astype(result_t, copy=False)
- return r
- # Linear Least Squares
- def lstsq(a, b, rcond=-1):
- """
- Return the least-squares solution to a linear matrix equation.
- Solves the equation `a x = b` by computing a vector `x` that
- minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
- be under-, well-, or over- determined (i.e., the number of
- linearly independent rows of `a` can be less than, equal to, or
- greater than its number of linearly independent columns). If `a`
- is square and of full rank, then `x` (but for round-off error) is
- the "exact" solution of the equation.
- Parameters
- ----------
- a : (M, N) array_like
- "Coefficient" matrix.
- b : {(M,), (M, K)} array_like
- Ordinate or "dependent variable" values. If `b` is two-dimensional,
- the least-squares solution is calculated for each of the `K` columns
- of `b`.
- rcond : float, optional
- Cut-off ratio for small singular values of `a`.
- Singular values are set to zero if they are smaller than `rcond`
- times the largest singular value of `a`.
- Returns
- -------
- x : {(N,), (N, K)} ndarray
- Least-squares solution. If `b` is two-dimensional,
- the solutions are in the `K` columns of `x`.
- residuals : {(), (1,), (K,)} ndarray
- Sums of residuals; squared Euclidean 2-norm for each column in
- ``b - a*x``.
- If the rank of `a` is < N or M <= N, this is an empty array.
- If `b` is 1-dimensional, this is a (1,) shape array.
- Otherwise the shape is (K,).
- rank : int
- Rank of matrix `a`.
- s : (min(M, N),) ndarray
- Singular values of `a`.
- Raises
- ------
- LinAlgError
- If computation does not converge.
- Notes
- -----
- If `b` is a matrix, then all array results are returned as matrices.
- Examples
- --------
- Fit a line, ``y = mx + c``, through some noisy data-points:
- >>> x = np.array([0, 1, 2, 3])
- >>> y = np.array([-1, 0.2, 0.9, 2.1])
- By examining the coefficients, we see that the line should have a
- gradient of roughly 1 and cut the y-axis at, more or less, -1.
- We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
- and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
- >>> A = np.vstack([x, np.ones(len(x))]).T
- >>> A
- array([[ 0., 1.],
- [ 1., 1.],
- [ 2., 1.],
- [ 3., 1.]])
- >>> m, c = np.linalg.lstsq(A, y)[0]
- >>> print(m, c)
- 1.0 -0.95
- Plot the data along with the fitted line:
- >>> import matplotlib.pyplot as plt
- >>> plt.plot(x, y, 'o', label='Original data', markersize=10)
- >>> plt.plot(x, m*x + c, 'r', label='Fitted line')
- >>> plt.legend()
- >>> plt.show()
- """
- import math
- a, _ = _makearray(a)
- b, wrap = _makearray(b)
- is_1d = len(b.shape) == 1
- if is_1d:
- b = b[:, newaxis]
- _assertRank2(a, b)
- m = a.shape[0]
- n = a.shape[1]
- n_rhs = b.shape[1]
- ldb = max(n, m)
- if m != b.shape[0]:
- raise LinAlgError('Incompatible dimensions')
- t, result_t = _commonType(a, b)
- result_real_t = _realType(result_t)
- real_t = _linalgRealType(t)
- bstar = zeros((ldb, n_rhs), t)
- bstar[:b.shape[0], :n_rhs] = b.copy()
- a, bstar = _fastCopyAndTranspose(t, a, bstar)
- a, bstar = _to_native_byte_order(a, bstar)
- s = zeros((min(m, n),), real_t)
- nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
- iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
- if isComplexType(t):
- lapack_routine = lapack_lite.zgelsd
- lwork = 1
- rwork = zeros((lwork,), real_t)
- work = zeros((lwork,), t)
- results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
- 0, work, -1, rwork, iwork, 0)
- lwork = int(abs(work[0]))
- rwork = zeros((lwork,), real_t)
- a_real = zeros((m, n), real_t)
- bstar_real = zeros((ldb, n_rhs,), real_t)
- results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
- bstar_real, ldb, s, rcond,
- 0, rwork, -1, iwork, 0)
- lrwork = int(rwork[0])
- work = zeros((lwork,), t)
- rwork = zeros((lrwork,), real_t)
- results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
- 0, work, lwork, rwork, iwork, 0)
- else:
- lapack_routine = lapack_lite.dgelsd
- lwork = 1
- work = zeros((lwork,), t)
- results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
- 0, work, -1, iwork, 0)
- lwork = int(work[0])
- work = zeros((lwork,), t)
- results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
- 0, work, lwork, iwork, 0)
- if results['info'] > 0:
- raise LinAlgError('SVD did not converge in Linear Least Squares')
- resids = array([], result_real_t)
- if is_1d:
- x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
- if results['rank'] == n and m > n:
- if isComplexType(t):
- resids = array([sum(abs(ravel(bstar)[n:])**2)],
- dtype=result_real_t)
- else:
- resids = array([sum((ravel(bstar)[n:])**2)],
- dtype=result_real_t)
- else:
- x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
- if results['rank'] == n and m > n:
- if isComplexType(t):
- resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
- result_real_t, copy=False)
- else:
- resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
- result_real_t, copy=False)
- st = s[:min(n, m)].astype(result_real_t, copy=True)
- return wrap(x), wrap(resids), results['rank'], st
- def _multi_svd_norm(x, row_axis, col_axis, op):
- """Compute a function of the singular values of the 2-D matrices in `x`.
- This is a private utility function used by numpy.linalg.norm().
- Parameters
- ----------
- x : ndarray
- row_axis, col_axis : int
- The axes of `x` that hold the 2-D matrices.
- op : callable
- This should be either numpy.amin or numpy.amax or numpy.sum.
- Returns
- -------
- result : float or ndarray
- If `x` is 2-D, the return values is a float.
- Otherwise, it is an array with ``x.ndim - 2`` dimensions.
- The return values are either the minimum or maximum or sum of the
- singular values of the matrices, depending on whether `op`
- is `numpy.amin` or `numpy.amax` or `numpy.sum`.
- """
- if row_axis > col_axis:
- row_axis -= 1
- y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
- result = op(svd(y, compute_uv=0), axis=-1)
- return result
- def norm(x, ord=None, axis=None, keepdims=False):
- """
- Matrix or vector norm.
- This function is able to return one of eight different matrix norms,
- or one of an infinite number of vector norms (described below), depending
- on the value of the ``ord`` parameter.
- Parameters
- ----------
- x : array_like
- Input array. If `axis` is None, `x` must be 1-D or 2-D.
- ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
- Order of the norm (see table under ``Notes``). inf means numpy's
- `inf` object.
- axis : {int, 2-tuple of ints, None}, optional
- If `axis` is an integer, it specifies the axis of `x` along which to
- compute the vector norms. If `axis` is a 2-tuple, it specifies the
- axes that hold 2-D matrices, and the matrix norms of these matrices
- are computed. If `axis` is None then either a vector norm (when `x`
- is 1-D) or a matrix norm (when `x` is 2-D) is returned.
- keepdims : bool, optional
- If this is set to True, the axes which are normed over are left in the
- result as dimensions with size one. With this option the result will
- broadcast correctly against the original `x`.
- .. versionadded:: 1.10.0
- Returns
- -------
- n : float or ndarray
- Norm of the matrix or vector(s).
- Notes
- -----
- For values of ``ord <= 0``, the result is, strictly speaking, not a
- mathematical 'norm', but it may still be useful for various numerical
- purposes.
- The following norms can be calculated:
- ===== ============================ ==========================
- ord norm for matrices norm for vectors
- ===== ============================ ==========================
- None Frobenius norm 2-norm
- 'fro' Frobenius norm --
- 'nuc' nuclear norm --
- inf max(sum(abs(x), axis=1)) max(abs(x))
- -inf min(sum(abs(x), axis=1)) min(abs(x))
- 0 -- sum(x != 0)
- 1 max(sum(abs(x), axis=0)) as below
- -1 min(sum(abs(x), axis=0)) as below
- 2 2-norm (largest sing. value) as below
- -2 smallest singular value as below
- other -- sum(abs(x)**ord)**(1./ord)
- ===== ============================ ==========================
- The Frobenius norm is given by [1]_:
- :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
- The nuclear norm is the sum of the singular values.
- References
- ----------
- .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
- Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
- Examples
- --------
- >>> from numpy import linalg as LA
- >>> a = np.arange(9) - 4
- >>> a
- array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
- >>> b = a.reshape((3, 3))
- >>> b
- array([[-4, -3, -2],
- [-1, 0, 1],
- [ 2, 3, 4]])
- >>> LA.norm(a)
- 7.745966692414834
- >>> LA.norm(b)
- 7.745966692414834
- >>> LA.norm(b, 'fro')
- 7.745966692414834
- >>> LA.norm(a, np.inf)
- 4.0
- >>> LA.norm(b, np.inf)
- 9.0
- >>> LA.norm(a, -np.inf)
- 0.0
- >>> LA.norm(b, -np.inf)
- 2.0
- >>> LA.norm(a, 1)
- 20.0
- >>> LA.norm(b, 1)
- 7.0
- >>> LA.norm(a, -1)
- -4.6566128774142013e-010
- >>> LA.norm(b, -1)
- 6.0
- >>> LA.norm(a, 2)
- 7.745966692414834
- >>> LA.norm(b, 2)
- 7.3484692283495345
- >>> LA.norm(a, -2)
- nan
- >>> LA.norm(b, -2)
- 1.8570331885190563e-016
- >>> LA.norm(a, 3)
- 5.8480354764257312
- >>> LA.norm(a, -3)
- nan
- Using the `axis` argument to compute vector norms:
- >>> c = np.array([[ 1, 2, 3],
- ... [-1, 1, 4]])
- >>> LA.norm(c, axis=0)
- array([ 1.41421356, 2.23606798, 5. ])
- >>> LA.norm(c, axis=1)
- array([ 3.74165739, 4.24264069])
- >>> LA.norm(c, ord=1, axis=1)
- array([ 6., 6.])
- Using the `axis` argument to compute matrix norms:
- >>> m = np.arange(8).reshape(2,2,2)
- >>> LA.norm(m, axis=(1,2))
- array([ 3.74165739, 11.22497216])
- >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
- (3.7416573867739413, 11.224972160321824)
- """
- x = asarray(x)
- if not issubclass(x.dtype.type, (inexact, object_)):
- x = x.astype(float)
- # Immediately handle some default, simple, fast, and common cases.
- if axis is None:
- ndim = x.ndim
- if ((ord is None) or
- (ord in ('f', 'fro') and ndim == 2) or
- (ord == 2 and ndim == 1)):
- x = x.ravel(order='K')
- if isComplexType(x.dtype.type):
- sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
- else:
- sqnorm = dot(x, x)
- ret = sqrt(sqnorm)
- if keepdims:
- ret = ret.reshape(ndim*[1])
- return ret
- # Normalize the `axis` argument to a tuple.
- nd = x.ndim
- if axis is None:
- axis = tuple(range(nd))
- elif not isinstance(axis, tuple):
- try:
- axis = int(axis)
- except:
- raise TypeError("'axis' must be None, an integer or a tuple of integers")
- axis = (axis,)
- if len(axis) == 1:
- if ord == Inf:
- return abs(x).max(axis=axis, keepdims=keepdims)
- elif ord == -Inf:
- return abs(x).min(axis=axis, keepdims=keepdims)
- elif ord == 0:
- # Zero norm
- return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
- elif ord == 1:
- # special case for speedup
- return add.reduce(abs(x), axis=axis, keepdims=keepdims)
- elif ord is None or ord == 2:
- # special case for speedup
- s = (x.conj() * x).real
- return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
- else:
- try:
- ord + 1
- except TypeError:
- raise ValueError("Invalid norm order for vectors.")
- if x.dtype.type is longdouble:
- # Convert to a float type, so integer arrays give
- # float results. Don't apply asfarray to longdouble arrays,
- # because it will downcast to float64.
- absx = abs(x)
- else:
- absx = x if isComplexType(x.dtype.type) else asfarray(x)
- if absx.dtype is x.dtype:
- absx = abs(absx)
- else:
- # if the type changed, we can safely overwrite absx
- abs(absx, out=absx)
- absx **= ord
- return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
- elif len(axis) == 2:
- row_axis, col_axis = axis
- if row_axis < 0:
- row_axis += nd
- if col_axis < 0:
- col_axis += nd
- if not (0 <= row_axis < nd and 0 <= col_axis < nd):
- raise ValueError('Invalid axis %r for an array with shape %r' %
- (axis, x.shape))
- if row_axis == col_axis:
- raise ValueError('Duplicate axes given.')
- if ord == 2:
- ret = _multi_svd_norm(x, row_axis, col_axis, amax)
- elif ord == -2:
- ret = _multi_svd_norm(x, row_axis, col_axis, amin)
- elif ord == 1:
- if col_axis > row_axis:
- col_axis -= 1
- ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
- elif ord == Inf:
- if row_axis > col_axis:
- row_axis -= 1
- ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
- elif ord == -1:
- if col_axis > row_axis:
- col_axis -= 1
- ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
- elif ord == -Inf:
- if row_axis > col_axis:
- row_axis -= 1
- ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
- elif ord in [None, 'fro', 'f']:
- ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
- elif ord == 'nuc':
- ret = _multi_svd_norm(x, row_axis, col_axis, sum)
- else:
- raise ValueError("Invalid norm order for matrices.")
- if keepdims:
- ret_shape = list(x.shape)
- ret_shape[axis[0]] = 1
- ret_shape[axis[1]] = 1
- ret = ret.reshape(ret_shape)
- return ret
- else:
- raise ValueError("Improper number of dimensions to norm.")
- # multi_dot
- def multi_dot(arrays):
- """
- Compute the dot product of two or more arrays in a single function call,
- while automatically selecting the fastest evaluation order.
- `multi_dot` chains `numpy.dot` and uses optimal parenthesization
- of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
- this can speed up the multiplication a lot.
- If the first argument is 1-D it is treated as a row vector.
- If the last argument is 1-D it is treated as a column vector.
- The other arguments must be 2-D.
- Think of `multi_dot` as::
- def multi_dot(arrays): return functools.reduce(np.dot, arrays)
- Parameters
- ----------
- arrays : sequence of array_like
- If the first argument is 1-D it is treated as row vector.
- If the last argument is 1-D it is treated as column vector.
- The other arguments must be 2-D.
- Returns
- -------
- output : ndarray
- Returns the dot product of the supplied arrays.
- See Also
- --------
- dot : dot multiplication with two arguments.
- References
- ----------
- .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
- .. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
- Examples
- --------
- `multi_dot` allows you to write::
- >>> from numpy.linalg import multi_dot
- >>> # Prepare some data
- >>> A = np.random.random(10000, 100)
- >>> B = np.random.random(100, 1000)
- >>> C = np.random.random(1000, 5)
- >>> D = np.random.random(5, 333)
- >>> # the actual dot multiplication
- >>> multi_dot([A, B, C, D])
- instead of::
- >>> np.dot(np.dot(np.dot(A, B), C), D)
- >>> # or
- >>> A.dot(B).dot(C).dot(D)
- Example: multiplication costs of different parenthesizations
- ------------------------------------------------------------
- The cost for a matrix multiplication can be calculated with the
- following function::
- def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
- Let's assume we have three matrices
- :math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
- The costs for the two different parenthesizations are as follows::
- cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
- cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
- """
- n = len(arrays)
- # optimization only makes sense for len(arrays) > 2
- if n < 2:
- raise ValueError("Expecting at least two arrays.")
- elif n == 2:
- return dot(arrays[0], arrays[1])
- arrays = [asanyarray(a) for a in arrays]
- # save original ndim to reshape the result array into the proper form later
- ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
- # Explicitly convert vectors to 2D arrays to keep the logic of the internal
- # _multi_dot_* functions as simple as possible.
- if arrays[0].ndim == 1:
- arrays[0] = atleast_2d(arrays[0])
- if arrays[-1].ndim == 1:
- arrays[-1] = atleast_2d(arrays[-1]).T
- _assertRank2(*arrays)
- # _multi_dot_three is much faster than _multi_dot_matrix_chain_order
- if n == 3:
- result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
- else:
- order = _multi_dot_matrix_chain_order(arrays)
- result = _multi_dot(arrays, order, 0, n - 1)
- # return proper shape
- if ndim_first == 1 and ndim_last == 1:
- return result[0, 0] # scalar
- elif ndim_first == 1 or ndim_last == 1:
- return result.ravel() # 1-D
- else:
- return result
- def _multi_dot_three(A, B, C):
- """
- Find the best order for three arrays and do the multiplication.
- For three arguments `_multi_dot_three` is approximately 15 times faster
- than `_multi_dot_matrix_chain_order`
- """
- # cost1 = cost((AB)C)
- cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
- A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
- # cost2 = cost((AB)C)
- cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
- A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
- if cost1 < cost2:
- return dot(dot(A, B), C)
- else:
- return dot(A, dot(B, C))
- def _multi_dot_matrix_chain_order(arrays, return_costs=False):
- """
- Return a np.array that encodes the optimal order of mutiplications.
- The optimal order array is then used by `_multi_dot()` to do the
- multiplication.
- Also return the cost matrix if `return_costs` is `True`
- The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
- Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
- cost[i, j] = min([
- cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
- for k in range(i, j)])
- """
- n = len(arrays)
- # p stores the dimensions of the matrices
- # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
- p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
- # m is a matrix of costs of the subproblems
- # m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
- m = zeros((n, n), dtype=double)
- # s is the actual ordering
- # s[i, j] is the value of k at which we split the product A_i..A_j
- s = empty((n, n), dtype=intp)
- for l in range(1, n):
- for i in range(n - l):
- j = i + l
- m[i, j] = Inf
- for k in range(i, j):
- q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
- if q < m[i, j]:
- m[i, j] = q
- s[i, j] = k # Note that Cormen uses 1-based index
- return (s, m) if return_costs else s
- def _multi_dot(arrays, order, i, j):
- """Actually do the multiplication with the given order."""
- if i == j:
- return arrays[i]
- else:
- return dot(_multi_dot(arrays, order, i, order[i, j]),
- _multi_dot(arrays, order, order[i, j] + 1, j))
|