Ecosyste.ms: Awesome
An open API service indexing awesome lists of open source software.
https://github.com/tjkessler/mojoml
Linear algebra and machine learning in Mojo 🔥
https://github.com/tjkessler/mojoml
linear-algebra machine-learning modular mojo
Last synced: about 2 months ago
JSON representation
Linear algebra and machine learning in Mojo 🔥
- Host: GitHub
- URL: https://github.com/tjkessler/mojoml
- Owner: tjkessler
- License: apache-2.0
- Created: 2023-12-31T19:22:53.000Z (11 months ago)
- Default Branch: main
- Last Pushed: 2024-01-07T03:28:42.000Z (11 months ago)
- Last Synced: 2024-01-07T04:25:08.701Z (11 months ago)
- Topics: linear-algebra, machine-learning, modular, mojo
- Homepage:
- Size: 1.63 MB
- Stars: 1
- Watchers: 1
- Forks: 0
- Open Issues: 0
-
Metadata Files:
- Readme: README.md
- License: LICENSE
Awesome Lists containing this project
- awesome-max-mojo - tjkessler/mojoml
- awesome-max-mojo - tjkessler/mojoml
README
# mojoml 🔥
Linear algebra and machine learning in Mojo 🔥
(Heavily inspired by the [official Mojo documentation](https://docs.modular.com/mojo/))
## Usage
Move the `mojoml.mojopkg` file to your current working directory, or build from source with:
```
$ git clone https://github.com/tjkessler/mojoml
$ cd mojoml
$ mojo package mojoml -o mojoml.mojopkg
```Matrix operations:
```python
from mojoml.structs import Matrix
from mojoml.structs.generators import random_matrix
from mojoml.linalg import matmul, norm, transposefn main() -> None:
let m1: Matrix = random_matrix(16, 32)
let m2: Matrix = random_matrix(32, 48)# m_matmul <- m1 @ m2
let m_matmul: Matrix = Matrix(16, 48)
matmul(m_matmul, m1, m2)let m3: Matrix = random_matrix(32, 32)
let m3_norm: Float32 = norm(m3)# m1_T <- m1.T
let m1_T: Matrix = Matrix(32, 16)
transpose(m1_T, m1)
```Feed-forward, activation, loss (more to come!):
```python
from mojoml.structs import Matrix
from mojoml.structs.generators import random_matrix
from mojoml.nn import Linear
from mojoml.nn.functional import mse_loss, relu, sigmoidfn main() -> None:
# define a layer with 16 inputs, 32 outputs
let layer: Linear = Linear(16, 32)# data to feed forward; 8 samples, 16 features per sample
let inputs: Matrix = random_matrix(8, 16)# output shape is 8 samples, 32 outputs per sample
let outputs: Matrix = Matrix(8, 32)# Y <- X @ W.T + B
layer.forward(outputs, inputs)# apply ReLU activation
let out_relu: Matrix = Matrix(8, 32)
relu(out_relu, outputs)# apply sigmoid activation
let out_sigmoid: Matrix = Matrix(8, 32)
sigmoid(out_sigmoid, outputs)# calculate MSE loss w/ dummy target values
let targets: Matrix = random_matrix(8, 32)
let loss_mat: Matrix = Matrix(1, 1)
mse_loss(loss_mat, out_sigmoid, targets)
let mse: Float32 = loss_mat[0, 0]# TODO: gradient descent!
```