mirror of
https://github.com/ml-explore/mlx.git
synced 2025-10-18 15:28:16 +08:00
QR factorization (#310)
* add qr factorization --------- Co-authored-by: Awni Hannun <awni@apple.com>
This commit is contained in:
@@ -177,4 +177,37 @@ void init_linalg(py::module_& parent_module) {
|
||||
>>> la.norm(m[0, :, :]), LA.norm(m[1, :, :])
|
||||
(array(3.74166, dtype=float32), array(11.225, dtype=float32))
|
||||
)pbdoc");
|
||||
m.def(
|
||||
"qr",
|
||||
&qr,
|
||||
"a"_a,
|
||||
py::kw_only(),
|
||||
"stream"_a = none,
|
||||
R"pbdoc(
|
||||
qr(a: array, *, stream: Union[None, Stream, Device] = None) -> (array, array)
|
||||
|
||||
The QR factorizatoin of the input matrix.
|
||||
|
||||
This function supports arrays with at least 2 dimensions. The matrices
|
||||
which are factorized are assumed to be in the last two dimensions of
|
||||
the input.
|
||||
|
||||
Args:
|
||||
a (array): Input array.
|
||||
stream (Stream, optional): Stream or device. Defaults to ``None``
|
||||
in which case the default stream of the default device is used.
|
||||
|
||||
Returns:
|
||||
tuple(array, array): The ``Q`` and ``R`` matrices.
|
||||
|
||||
Example:
|
||||
>>> A = mx.array([[2., 3.], [1., 2.]])
|
||||
>>> Q, R = mx.linalg.qr(A, stream=mx.cpu)
|
||||
>>> Q
|
||||
array([[-0.894427, -0.447214],
|
||||
[-0.447214, 0.894427]], dtype=float32)
|
||||
>>> R
|
||||
array([[-2.23607, -3.57771],
|
||||
[0, 0.447214]], dtype=float32)
|
||||
)pbdoc");
|
||||
}
|
||||
|
@@ -55,7 +55,7 @@ void init_ops(py::module_& m) {
|
||||
Args:
|
||||
a (array): Input array.
|
||||
shape (tuple(int)): New shape.
|
||||
stream (Stream, optional): Stream or device. Defaults to ```None```
|
||||
stream (Stream, optional): Stream or device. Defaults to ``None``
|
||||
in which case the default stream of the default device is used.
|
||||
|
||||
Returns:
|
||||
@@ -112,7 +112,7 @@ void init_ops(py::module_& m) {
|
||||
Args:
|
||||
a (array): Input array.
|
||||
axis (int or tuple(int), optional): Axes to remove. Defaults
|
||||
to ```None``` in which case all size one axes are removed.
|
||||
to ``None`` in which case all size one axes are removed.
|
||||
|
||||
Returns:
|
||||
array: The output array with size one axes removed.
|
||||
|
@@ -89,6 +89,37 @@ class TestLinalg(mlx_tests.MLXTestCase):
|
||||
out_mx = mx.linalg.norm(x_mx, ord="fro")
|
||||
self.assertTrue(np.allclose(out_np, out_mx, atol=1e-5, rtol=1e-6))
|
||||
|
||||
def test_qr_factorization(self):
|
||||
with self.assertRaises(ValueError):
|
||||
mx.linalg.qr(mx.array(0.0))
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
mx.linalg.qr(mx.array([0.0, 1.0]))
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
mx.linalg.qr(mx.array([[0, 1], [1, 0]]))
|
||||
|
||||
A = mx.array([[2.0, 3.0], [1.0, 2.0]])
|
||||
Q, R = mx.linalg.qr(A, stream=mx.cpu)
|
||||
out = Q @ R
|
||||
self.assertTrue(mx.allclose(out, A))
|
||||
out = Q @ Q
|
||||
self.assertTrue(mx.allclose(out, mx.eye(2), rtol=1e-5, atol=1e-7))
|
||||
self.assertTrue(mx.allclose(mx.tril(R, -1), mx.zeros_like(R)))
|
||||
self.assertEqual(Q.dtype, mx.float32)
|
||||
self.assertEqual(R.dtype, mx.float32)
|
||||
|
||||
# Multiple matrices
|
||||
B = mx.array([[-1.0, 2.0], [-4.0, 1.0]])
|
||||
A = mx.stack([A, B])
|
||||
Q, R = mx.linalg.qr(A, stream=mx.cpu)
|
||||
for a, q, r in zip(A, Q, R):
|
||||
out = q @ r
|
||||
self.assertTrue(mx.allclose(out, a))
|
||||
out = q @ q
|
||||
self.assertTrue(mx.allclose(out, mx.eye(2), rtol=1e-5, atol=1e-7))
|
||||
self.assertTrue(mx.allclose(mx.tril(r, -1), mx.zeros_like(r)))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
Reference in New Issue
Block a user