Chapter 6: 유전체학

이 장에서는 RNA 간섭(RNAi) 서열 및 전사 인자(Transcription Factor) 결합 등 유전체학에 딥러닝을 적용하는 방법에 대해 살펴봅니다.

RNA 간섭(RNAi) 예측

siRNA 서열의 효과를 예측하기 위해 합성곱 신경망(CNN)을 구축합니다.

import deepchem as dc
import tensorflow as tf
import tensorflow.keras.layers as layers
# import matplotlib.pyplot as plt

# Train a model to predict how well sequences will work for RNA interference.

# Build the model.
features = tf.keras.Input(shape=(21, 4))
prev = features
for i in range(2):
    prev = layers.Conv1D(
        filters=10, kernel_size=10, activation=tf.nn.relu, padding="same"
    )(prev)
    prev = layers.Dropout(rate=0.3)(prev)
output = layers.Dense(units=1, activation=tf.math.sigmoid)(layers.Flatten()(prev))
keras_model = tf.keras.Model(inputs=features, outputs=output)
model = dc.models.KerasModel(
    keras_model, loss=dc.models.losses.L2Loss(), batch_size=1000, model_dir="rnai"
)

# Load the data.
train = dc.data.DiskDataset("train_siRNA")
valid = dc.data.DiskDataset("valid_siRNA")

# Train the model, tracking its performance on the training and validation datasets.
metric = dc.metrics.Metric(dc.metrics.pearsonr, mode="regression")
for i in range(20):
    model.fit(train, nb_epoch=10)
    print(model.evaluate(train, [metric])["pearsonr"])
    print(model.evaluate(valid, [metric])["pearsonr"])

전사 인자 결합(Transcription Factor Binding)

DNA에 결합하는 전사 인자의 위치를 예측하는 것은 유전체학의 필수적인 작업입니다.

단순 DNA 결합

import deepchem as dc
import tensorflow as tf
import tensorflow.keras.layers as layers

# Train a model to predict binding sites for the transcription factor JUND.

# Build the model.
features = tf.keras.Input(shape=(101, 4))
prev = features
for i in range(3):
    prev = layers.Conv1D(
        filters=15, kernel_size=10, activation=tf.nn.relu, padding="same"
    )(prev)
    prev = layers.Dropout(rate=0.5)(prev)
logits = layers.Dense(units=1)(layers.Flatten()(prev))
output = layers.Activation(tf.math.sigmoid)(logits)
keras_model = tf.keras.Model(inputs=features, outputs=[output, logits])
model = dc.models.KerasModel(
    keras_model,
    loss=dc.models.losses.SigmoidCrossEntropy(),
    output_types=["prediction", "loss"],
    batch_size=1000,
    model_dir="tf",
)

# Load the data.
train = dc.data.DiskDataset("train_dataset")
valid = dc.data.DiskDataset("valid_dataset")

# Train the model, tracking its performance on the training and validation datasets.
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
for i in range(20):
    model.fit(train, nb_epoch=10)
    print(model.evaluate(train, [metric]))
    print(model.evaluate(valid, [metric]))

크로마틴 접근성 데이터와 결합(Binding with Chromatin Accessibility)

크로마틴 접근성(chromatin accessibility) 데이터를 모델에 포함하여 결합 예측 성능을 향상시키는 예제입니다.

import deepchem as dc
import tensorflow as tf
import tensorflow.keras.layers as layers
import numpy as np

# Train a model to predict transcription factor binding, based on both
# sequence and chromatin accessibility.

# Build the model.
features = tf.keras.Input(shape=(101, 4))
accessibility = tf.keras.Input(shape=(1,))
prev = features
for i in range(3):
    prev = layers.Conv1D(
        filters=15, kernel_size=10, activation=tf.nn.relu, padding="same"
    )(prev)
    prev = layers.Dropout(rate=0.5)(prev)
prev = layers.Concatenate()([layers.Flatten()(prev), accessibility])
logits = layers.Dense(units=1)(prev)
output = layers.Activation(tf.math.sigmoid)(logits)
keras_model = tf.keras.Model(inputs=[features, accessibility], outputs=[output, logits])
model = dc.models.KerasModel(
    keras_model,
    loss=dc.models.losses.SigmoidCrossEntropy(),
    output_types=["prediction", "loss"],
    batch_size=1000,
    model_dir="chromatin",
)

# Load the data.
train = dc.data.DiskDataset("train_dataset")
valid = dc.data.DiskDataset("valid_dataset")
span_accessibility = {}
for line in open("accessibility.txt"):
    fields = line.split()
    span_accessibility[fields[0]] = float(fields[1])


# Define a generator function to produce batches.
def generate_batches(dataset, epochs):
    for epoch in range(epochs):
        for X, y, w, ids in dataset.iterbatches(batch_size=1000, pad_batches=True):
            yield ([X, np.array([span_accessibility[id] for id in ids])], [y], [w])


# Train the model, tracking its performance on the training and validation datasets.
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
for i in range(20):
    model.fit_generator(generate_batches(train, epochs=10))
    print(model.evaluate_generator(generate_batches(train, 1), [metric]))
    print(model.evaluate_generator(generate_batches(valid, 1), [metric]))