Performance Analysis - Jax
Number of effective sequences implemented in Tensorflow
In the previous post I have compared various languages and libraries in terms of their speed. This notebook contains the code used in the comparison as well as some details about the choices made to improve the performance of Jax implementation.
# ! pip install numpy
# ! pip install pandas
# ! pip install --upgrade jax jaxlib
# ! pip install --upgrade jax jaxlib==0.1.66+cuda111 -f https://storage.googleapis.com/jax-releases/jax_releases.html
import pandas as pd
import numpy as np
def get_data(path):
fasta_df = pd.read_csv(path, lineterminator=">", header=None)
fasta_df[['id', 'seq']] = fasta_df[0].str.split('\n', expand=True)[[0,1]]
return fasta_df.seq.to_numpy(dtype=str)
seqs = get_data('picked_msa.fasta')
seqs = get_data('../data/picked_msa.fasta')
Just to remind the pseudo code looks like this:
for seq1 in seqs:
for seq2 in seqs:
if count_mathes(seq1, seq2) > threshold:
weight +=1
meff += 1/weight
meff = meff/(len(seq1)^0.5)
import jax.numpy as jnp
from jax import jit as jax_jit
from jax import vmap
import jax
cpu_device = jax.devices('cpu')[0]
@jax_jit
def get_nf_jax_pair(a, b, threshold=0.8,batch_size=1):
return jnp.equal(a, b).mean(-1) > threshold
@jax_jit
def get_nf_jax(seqs):
n_seqs, seq_len = seqs.shape
out = vmap(get_nf_jax_pair, (0, None))(seqs, seqs)
return (1/jnp.sum(out, axis=1)).sum() /(seq_len**0.5)
seqs_ = seqs[:2500]
get_nf_jax(seqs_.view(np.uint32).reshape(seqs_.shape[0], -1))
%%timeit -n 3 -r 3
nf_jax_gpu = get_nf_jax(seqs_.view(np.uint32).reshape(seqs_.shape[0], -1))
nf_jax_gpu.block_until_ready()
seqs_ = seqs[:1000]
%%timeit -n 3 -r 3
with jax.default_device(cpu_device):
nf_jax = get_nf_jax(seqs_.view(np.uint32).reshape(seqs_.shape[0], -1))
Couple points:
- Jax does not support Windows
- It is signficantly fater than Pytorch and Tensorflow and I am not entirely sure why.