NNCとNNabla

SONYのNeural Network Consoleは、ディープラーニングのモデル作成をGUIベースで作成することが出来て便利なのですが、ウェブサービスに組み込みたいといった場合に情報が乏しい気がしました。

というわけでメモとして作成してみました。

ここにはコードの抜粋だけですが、GitHubにすべてのコードアップロードしています。

https://github.com/MizunagiKB/vividface

NNCで作成したモデルをPythonコード化

まずは以下のようなモデルをNNCで作成・学習を行いました。このモデルはVivid Strike!に登場する7キャラクターを分類する為に作成したものです。

学習が完了したらをExport > NNP (Neural Network Libraries file format)すると、nnpファイルが生成出来ます。

具体的には以下の様にして使用します。

# 初期化
nn.clear_parameters()
# 生成したnnpファイルをMODEL_PATHNAMEに指定。
nnc_nnp = U.nnp_graph.NnpLoader(MODEL_PATHNAME)
# 利用するモデル名称(大抵はMainRuntime)とバッチ数(ここでは1)を指定。
nnc_net = nnc_nnp.get_network("MainRuntime", 1)

# 入力と出力を取得します。
x = nnc_net.inputs["Input"]
f = nnc_net.outputs["Softmax"]

# バッチ数が1なのでひとつだけ指定し、推論を実行。
x.d = [nnl_image]
f.forward()

# f.dに値が入っています。
return f.d[0].tolist()

学習したモデルをPythonから利用する例

# ------------------------------------------------------------------ import(s)
import sys
import os
import random
import csv

import numpy as np

# -------------------------------------------------------------------- nnabla
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import nnabla.logger as L
import nnabla.utils as U
import nnabla.utils.image_utils
import nnabla.utils.nnp_graph

try:
    import nnabla.ext_utils
    ctx = nnabla.ext_utils.get_extension_context("cudnn")
    nn.set_default_context(ctx)
except:
    pass

TARGET_CHARA = {
    0: "corona",
    1: "einhald",
    2: "fuka",
    3: "miura",
    4: "rinne",
    5: "rio",
    6: "vivio"
}
MODEL_PATHNAME = "vividface_nnc_model/model.nnp"
DATASET_DIR = "./dataset"
IMAGE_W = 48
IMAGE_H = 48
IMAGE_D = 3
LOAD_SIZE = 100
BATCH_SIZE = len(TARGET_CHARA) * 20


# ------------------------------------------------------------------- class(s)
class CDataset(object):
    def __init__(self, chara_name, filename, nnl_image, nnl_onehot):
        self.chara_name = chara_name
        self.filename = filename
        self.nnl_image = nnl_image
        self.nnl_onehot = nnl_onehot


# ---------------------------------------------------------------- function(s)
def valid(list_valid):

    nn.clear_parameters()
    nnc_nnp = U.nnp_graph.NnpLoader(MODEL_PATHNAME)
    nnc_net = nnc_nnp.get_network("MainRuntime", len(list_valid))

    x = nnc_net.inputs["Input"]
    f = nnc_net.outputs["Softmax"]

    list_size = [0] * len(TARGET_CHARA)
    list_true = [0] * len(TARGET_CHARA)
    list_ans = [0] * len(TARGET_CHARA)

    list_x = [o.nnl_image for o in list_valid]
    list_y = [o.nnl_onehot for o in list_valid]

    x.d = list_x

    f.forward(clear_buffer=True)

    for y, result in zip(list_y, f.d):
        v_max = max(result)
        n_idx = result.tolist().index(v_max)
        list_onehot = [0] * len(TARGET_CHARA)
        list_onehot[n_idx] = 1

        list_size[y.index(1)] += 1
        list_ans[n_idx] += 1
        if list_onehot == y:
            list_true[n_idx] += 1

    list_result = []
    for size, true in zip(list_size, list_true):
        if size > 0:
            list_result.append(true / size)
        else:
            list_result.append(0)

    L.info("size     " + " ".join(["%6.2f" % v for v in list_size]))
    L.info("true     " + " ".join(["%6.2f" % v for v in list_true]))
    L.info("ans      " + " ".join(["%6.2f" % v for v in list_ans]))
    L.info("result   " + " ".join(["%6.2f" % v for v in list_result]))

    return list_result


def inference(nnl_image):

    nn.clear_parameters()
    nnc_nnp = U.nnp_graph.NnpLoader(MODEL_PATHNAME)
    nnc_net = nnc_nnp.get_network("MainRuntime", 1)

    x = nnc_net.inputs["Input"]
    f = nnc_net.outputs["Softmax"]

    x.d = [nnl_image]

    f.forward()

    return f.d[0].tolist()


def main():

    list_train = []
    list_valid = []

    for dir_name, _, list_filename in os.walk(DATASET_DIR):
        _, chara_name = os.path.split(dir_name)
        if chara_name in TARGET_CHARA.values():
            list_image = []
            for filename in list_filename:
                if os.path.splitext(filename)[1].lower() in (".png", ".jpg",
                                                             ".jpeg"):

                    nnl_image = U.image_utils.imread(os.path.join(
                        dir_name, filename),
                                                     size=(IMAGE_W, IMAGE_H),
                                                     channel_first=False)

                    nnl_image = nnl_image.transpose(2, 0, 1)

                    list_onehot = [0] * len(TARGET_CHARA)
                    dict_chara = {v: k for k, v in TARGET_CHARA.items()}
                    list_onehot[dict_chara[chara_name]] = 1

                    list_image.append(
                        CDataset(chara_name, filename, nnl_image / 255.0,
                                 list_onehot))

                if len(list_image) == LOAD_SIZE:
                    break

            list_train += list_image[0:-20]
            list_valid += list_image[-20:]

            L.info("%s %4d %4d" %
                   (chara_name, len(list_train), len(list_valid)))

    valid(list_valid)
    L.info(inference(list_valid[0].nnl_image))


if __name__ == "__main__":
    main()

# [EOF]

Webサービスに組み込んでみた例(NNC版)

# ------------------------------------------------------------------ import(s)
import sys
import os
import hashlib
import io
import time

# -------------------------------------------------------------------- bottle
import bottle
import numpy as np
import PIL.Image
import cv2

# -------------------------------------------------------------------- nnabla
import nnabla as nn
import nnabla.logger as L
import nnabla.utils as U
import nnabla.utils.image_utils
import nnabla.utils.nnp_graph

import vividface_nnl

IMAGE_W = vividface_nnl.IMAGE_W
IMAGE_H = vividface_nnl.IMAGE_H
IMAGE_D = vividface_nnl.IMAGE_D
TARGET_CHARA = vividface_nnl.TARGET_CHARA

MODEL_PATHNAME = "vividface_nnc_model/model.nnp"

EDGE_COLOR = (255, 255, 255)
LINE_COLOR = (0, 0, 255)
IMWORK_EXPIRE_SEC = 1 * 60 * 60
IMWORK_DIR = "./imwork"


# ------------------------------------------------------------------- class(s)
# ---------------------------------------------------------------- function(s)
def imwork_clean(expire_sec):

    current_time = time.time()

    for dir_name, _, list_filename in os.walk(IMWORK_DIR):
        for filename in list_filename:
            if os.path.splitext(filename)[1] in (".png", ".jpg", ".jpeg"):
                pathname = os.path.join(dir_name, filename)
                oss = os.stat(pathname)
                if (current_time - oss.st_mtime) > expire_sec:
                    os.remove(pathname)


@bottle.route("/")
def html_index():
    return bottle.template("index")


@bottle.route("/imwork/<img_filepath:path>", name="imwork")
def res_image(img_filepath):
    return bottle.static_file(img_filepath, root=IMWORK_DIR)


@bottle.route("/decide")
def html_decide():
    bottle.redirect("/")


@bottle.route("/decide", method="POST")
def do_upload():
    try:
        upload = bottle.request.files.get("upload", "")
        if os.path.splitext(upload.filename)[1].lower() not in (".png", ".jpg",
                                                                ".jpeg"):
            bottle.redirect("/")
    except AttributeError:
        bottle.redirect("/")

    data_raw = upload.file.read()

    image_hash = hashlib.sha1(data_raw).hexdigest()

    data_pil = PIL.Image.open(io.BytesIO(data_raw))
    if data_pil.mode != "RGB":
        data_pil = data_pil.convert("RGB")

    clip_cv2 = cv2.cvtColor(np.asarray(data_pil), cv2.COLOR_RGB2BGR)
    data_cv2 = cv2.cvtColor(np.asarray(data_pil), cv2.COLOR_RGB2BGR)

    cv2_cascade = cv2.CascadeClassifier("lbpcascade_animeface.xml")

    gry = cv2.cvtColor(data_cv2, cv2.COLOR_BGR2GRAY)
    gry = cv2.equalizeHist(gry)

    list_face = cv2_cascade.detectMultiScale(gry,
                                             scaleFactor=1.1,
                                             minNeighbors=5,
                                             minSize=(IMAGE_W, IMAGE_H))

    nn.clear_parameters()
    nnc_nnp = U.nnp_graph.NnpLoader(MODEL_PATHNAME)
    nnc_net = nnc_nnp.get_network("MainRuntime", 1)
    net_x = nnc_net.inputs["Input"]
    net_y = nnc_net.outputs["Softmax"]

    list_result = []
    for idx, tpl_region in enumerate(list_face):
        ix, iy, iw, ih = tpl_region

        o_ref = cv2.cvtColor(clip_cv2[iy:iy + ih, ix:ix + iw],
                             cv2.COLOR_BGR2RGB)
        o_ref = U.image_utils.imresize(o_ref, (IMAGE_W, IMAGE_H))

        face_pathname = os.path.join(IMWORK_DIR,
                                     "%s_%02d.png" % (image_hash, idx))
        U.image_utils.imsave(face_pathname, o_ref)

        net_x.d = [(o_ref.transpose(2, 0, 1) / 255)]
        net_y.forward()
        list_detect_rate = net_y.d[0].tolist()

        rate_max = max(list_detect_rate)
        n_idx = list_detect_rate.index(rate_max)
        chracter_name = TARGET_CHARA[n_idx]

        cv2.rectangle(data_cv2, (ix, iy), (ix + iw, iy + ih), EDGE_COLOR, 3)
        cv2.rectangle(data_cv2, (ix, iy), (ix + iw, iy + ih), LINE_COLOR, 1)
        for txt_y in (-2, -1, 0, 1, 2):
            for txt_x in (-2, -1, 0, 1, 2):
                cv2.putText(data_cv2, "%d) %s" % (idx, chracter_name),
                            (ix + 3 + txt_x, iy + ih + 16 + txt_y),
                            cv2.FONT_HERSHEY_DUPLEX, 0.5, EDGE_COLOR)
        cv2.putText(data_cv2, "%d) %s" % (idx, chracter_name),
                    (ix + 3, iy + ih + 16), cv2.FONT_HERSHEY_DUPLEX, 0.5,
                    LINE_COLOR)

        list_result.append([face_pathname, chracter_name, list_detect_rate])

    imwork_clean(IMWORK_EXPIRE_SEC)

    pathname = os.path.join(IMWORK_DIR, "%s.jpg" % (image_hash, ))
    cv2.imwrite(pathname, data_cv2)

    return bottle.template("index", pathname=pathname, list_result=list_result)


if __name__ == "__main__":
    bottle.run(host="localhost", port=8001, debug=True, reloader=True)

# [EOF]

NNCを介さず、NNLだけで実現する例

NNCを使用せずにNNLだけで記述すると、以下の様になります。

# ------------------------------------------------------------------ import(s)
import sys
import os
import random
import csv

import numpy as np

# -------------------------------------------------------------------- nnabla
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import nnabla.logger as L
import nnabla.utils as U
import nnabla.utils.image_utils

try:
    import nnabla.ext_utils
    ctx = nnabla.ext_utils.get_extension_context("cudnn")
    nn.set_default_context(ctx)
except:
    pass

TARGET_CHARA = {
    0: "corona",
    1: "einhald",
    2: "fuka",
    3: "miura",
    4: "rinne",
    5: "rio",
    6: "vivio"
}

DATASET_DIR = "./dataset"
IMAGE_W = 48
IMAGE_H = 48
IMAGE_D = 3
LOAD_SIZE = 100
BATCH_SIZE = len(TARGET_CHARA) * 20

VIVIDFACE_TRAIN = "vividface_train_%dx%d" % (IMAGE_W, IMAGE_H)
VIVIDFACE_VALID = "vividface_valid_%dx%d" % (IMAGE_W, IMAGE_H)


# ------------------------------------------------------------------- class(s)
class CDataset(object):
    def __init__(self, chara_name, filename, nnl_image, nnl_onehot):
        self.chara_name = chara_name
        self.filename = filename
        self.nnl_image = nnl_image
        self.nnl_onehot = nnl_onehot


# ---------------------------------------------------------------- function(s)
def build(in_x, in_y, train=True):

    if train is True:
        h = F.image_augmentation(in_x, (IMAGE_D, IMAGE_W, IMAGE_H), (0, 0),
                                 1.0, 1.0, 0.0, 1.0, 0.0, False, False, 0.0,
                                 False, 1.1, 0.5, False, 0.0, 0)
    else:
        h = F.image_augmentation(in_x)

    with nn.parameter_scope("conv1"):
        h = PF.convolution(h, 8, (2, 2), stride=(2, 2), pad=(0, 0))
        h = PF.batch_normalization(h,
                                   axes=(1, ),
                                   decay_rate=0.9,
                                   eps=0.0001,
                                   batch_stat=train)
        h = F.relu(h, True)

    with nn.parameter_scope("conv2"):
        h = PF.convolution(h, 16, (2, 2), stride=(2, 2), pad=(0, 0))
        h = PF.batch_normalization(h,
                                   axes=(1, ),
                                   decay_rate=0.9,
                                   eps=0.0001,
                                   batch_stat=train)
        h = F.relu(h, True)

    with nn.parameter_scope("conv3"):
        h = PF.convolution(h, 64, (3, 3), stride=(2, 2), pad=(1, 1))
        h = PF.batch_normalization(h,
                                   axes=(1, ),
                                   decay_rate=0.9,
                                   eps=0.0001,
                                   batch_stat=train)
        h = F.relu(h, True)

    with nn.parameter_scope("affine4"):
        h = PF.affine(h, len(TARGET_CHARA) * 20)
        h = F.relu(h, True)

    with nn.parameter_scope("affine5"):
        h = PF.affine(h, len(TARGET_CHARA) * 10)
        h = F.relu(h, True)

    with nn.parameter_scope("affine6"):
        h = PF.affine(h, len(TARGET_CHARA) * 3)
        h = F.relu(h, True)

    with nn.parameter_scope("affine7"):
        h = PF.affine(h, len(TARGET_CHARA))
        h = F.softmax(h)

    return h


def train(list_train, list_valid, epoch_limit=10000):

    x = nn.Variable(shape=(BATCH_SIZE, IMAGE_D, IMAGE_W, IMAGE_H))
    y = nn.Variable(shape=(BATCH_SIZE, len(TARGET_CHARA)))
    f = build(x, None)

    h = F.squared_error(f, y)

    loss = F.mean(h)

    solver = S.Adam()
    solver.set_parameters(nn.get_parameters())

    for _ in range(10):
        random.shuffle(list_train)

    epoch = 1
    while True:

        for n in range(0, len(list_train), BATCH_SIZE):

            x.d = [o.nnl_image for o in list_train[n:n + BATCH_SIZE]]
            y.d = [o.nnl_onehot for o in list_train[n:n + BATCH_SIZE]]

            loss.forward()
            solver.zero_grad()
            loss.backward()
            solver.update()

        if (epoch % 10) == 0:
            list_result = valid(list_valid)
            min_score = min(list_result)
            L.info("epoch(s): [%6d]  score: [%.3f] loss: [%.12f]" %
                   (epoch, min_score, loss.d))
            if min_score > 0.65:
                model_filename = "vividface_nnl_model/train_model_%06d_%03d_%6f.h5" % (
                    epoch, int(min_score * 100), loss.d)
                nn.save_parameters(model_filename)

        epoch += 1
        if epoch_limit > 0:
            if epoch > epoch_limit:
                break


def valid(list_valid):

    x = nn.Variable(shape=(len(list_valid), IMAGE_D, IMAGE_W, IMAGE_H))
    f = build(x, None, False)

    list_size = [0] * len(TARGET_CHARA)
    list_true = [0] * len(TARGET_CHARA)
    list_ans = [0] * len(TARGET_CHARA)

    list_x = [o.nnl_image for o in list_valid]
    list_y = [o.nnl_onehot for o in list_valid]

    x.d = list_x

    f.forward()

    for y, result in zip(list_y, f.d):
        v_max = max(result)
        n_idx = result.tolist().index(v_max)
        list_onehot = [0] * len(TARGET_CHARA)
        list_onehot[n_idx] = 1

        list_size[y.index(1)] += 1
        list_ans[n_idx] += 1
        if list_onehot == y:
            list_true[n_idx] += 1

    list_result = []
    for size, true in zip(list_size, list_true):
        if size > 0:
            list_result.append(true / size)
        else:
            list_result.append(0)

    L.info("size     " + " ".join(["%6.2f" % v for v in list_size]))
    L.info("true     " + " ".join(["%6.2f" % v for v in list_true]))
    L.info("ans      " + " ".join(["%6.2f" % v for v in list_ans]))
    L.info("result   " + " ".join(["%6.2f" % v for v in list_result]))

    return list_result


def inference(nnl_image):

    x = nn.Variable(shape=(1, IMAGE_D, IMAGE_W, IMAGE_H))
    x.d = [nnl_image]

    f = build(x, None, False)
    f.forward()

    return f.d[0].tolist()


def dataset_save(dir_name, basename, list_data):

    csv_file = os.path.join(dir_name, basename) + ".csv"
    print(csv_file)

    with open(csv_file, "w") as hw:
        csv_w = csv.writer(hw)
        csv_w.writerow(["x"] + [
            "y__%d:%s" % (n, TARGET_CHARA[n]) for n in range(len(TARGET_CHARA))
        ])
        for o in list_data:
            csv_w.writerow(["./%s/%s" % (o.chara_name, o.filename)] +
                           o.nnl_onehot)


def main():

    list_train = []
    list_valid = []

    for dir_name, _, list_filename in os.walk(DATASET_DIR):
        _, chara_name = os.path.split(dir_name)
        if chara_name in TARGET_CHARA.values():
            list_image = []
            for filename in list_filename:
                if os.path.splitext(filename)[1].lower() in (".png", ".jpg",
                                                             ".jpeg"):

                    nnl_image = U.image_utils.imread(os.path.join(
                        dir_name, filename),
                                                     size=(IMAGE_W, IMAGE_H),
                                                     channel_first=False)

                    nnl_image = nnl_image.transpose(2, 0, 1)

                    list_onehot = [0] * len(TARGET_CHARA)
                    dict_chara = {v: k for k, v in TARGET_CHARA.items()}
                    list_onehot[dict_chara[chara_name]] = 1

                    list_image.append(
                        CDataset(chara_name, filename, nnl_image / 255.0,
                                 list_onehot))

                if len(list_image) == LOAD_SIZE:
                    break

            list_train += list_image[0:-20]
            list_valid += list_image[-20:]

            L.info("%s %4d %4d" %
                   (chara_name, len(list_train), len(list_valid)))

    dataset_save(DATASET_DIR, VIVIDFACE_TRAIN, list_train)
    dataset_save(DATASET_DIR, VIVIDFACE_VALID, list_valid)

    train(list_train, list_valid)


if __name__ == "__main__":
    main()

# [EOF]

Webサービスに組み込んでみた例(NNL版)

# ------------------------------------------------------------------ import(s)
import sys
import os
import hashlib
import io
import time

# -------------------------------------------------------------------- bottle
import bottle
import numpy as np
import PIL.Image
import cv2

# -------------------------------------------------------------------- nnabla
import nnabla as nn
import nnabla.logger as L
import nnabla.utils as U
import nnabla.utils.image_utils
import nnabla.utils.nnp_graph

import vividface_nnl

IMAGE_W = vividface_nnl.IMAGE_W
IMAGE_H = vividface_nnl.IMAGE_H
IMAGE_D = vividface_nnl.IMAGE_D
TARGET_CHARA = vividface_nnl.TARGET_CHARA

MODEL_PATHNAME = "vividface_nnc_model/model.nnp"

EDGE_COLOR = (255, 255, 255)
LINE_COLOR = (0, 0, 255)
IMWORK_EXPIRE_SEC = 1 * 60 * 60
IMWORK_DIR = "./imwork"


# ------------------------------------------------------------------- class(s)
# ---------------------------------------------------------------- function(s)
def imwork_clean(expire_sec):

    current_time = time.time()

    for dir_name, _, list_filename in os.walk(IMWORK_DIR):
        for filename in list_filename:
            if os.path.splitext(filename)[1] in (".png", ".jpg", ".jpeg"):
                pathname = os.path.join(dir_name, filename)
                oss = os.stat(pathname)
                if (current_time - oss.st_mtime) > expire_sec:
                    os.remove(pathname)


@bottle.route("/")
def html_index():
    return bottle.template("index")


@bottle.route("/imwork/<img_filepath:path>", name="imwork")
def res_image(img_filepath):
    return bottle.static_file(img_filepath, root=IMWORK_DIR)


@bottle.route("/decide")
def html_decide():
    bottle.redirect("/")


@bottle.route("/decide", method="POST")
def do_upload():
    try:
        upload = bottle.request.files.get("upload", "")
        if os.path.splitext(upload.filename)[1].lower() not in (".png", ".jpg",
                                                                ".jpeg"):
            bottle.redirect("/")
    except AttributeError:
        bottle.redirect("/")

    data_raw = upload.file.read()

    image_hash = hashlib.sha1(data_raw).hexdigest()

    data_pil = PIL.Image.open(io.BytesIO(data_raw))
    if data_pil.mode != "RGB":
        data_pil = data_pil.convert("RGB")

    clip_cv2 = cv2.cvtColor(np.asarray(data_pil), cv2.COLOR_RGB2BGR)
    data_cv2 = cv2.cvtColor(np.asarray(data_pil), cv2.COLOR_RGB2BGR)

    cv2_cascade = cv2.CascadeClassifier("lbpcascade_animeface.xml")

    gry = cv2.cvtColor(data_cv2, cv2.COLOR_BGR2GRAY)
    gry = cv2.equalizeHist(gry)

    list_face = cv2_cascade.detectMultiScale(gry,
                                             scaleFactor=1.1,
                                             minNeighbors=5,
                                             minSize=(IMAGE_W, IMAGE_H))

    nn.clear_parameters()
    nnc_nnp = U.nnp_graph.NnpLoader(MODEL_PATHNAME)
    nnc_net = nnc_nnp.get_network("MainRuntime", 1)
    net_x = nnc_net.inputs["Input"]
    net_y = nnc_net.outputs["Softmax"]

    list_result = []
    for idx, tpl_region in enumerate(list_face):
        ix, iy, iw, ih = tpl_region

        o_ref = cv2.cvtColor(clip_cv2[iy:iy + ih, ix:ix + iw],
                             cv2.COLOR_BGR2RGB)
        o_ref = U.image_utils.imresize(o_ref, (IMAGE_W, IMAGE_H))

        face_pathname = os.path.join(IMWORK_DIR,
                                     "%s_%02d.png" % (image_hash, idx))
        U.image_utils.imsave(face_pathname, o_ref)

        net_x.d = [(o_ref.transpose(2, 0, 1) / 255)]
        net_y.forward()
        list_detect_rate = net_y.d[0].tolist()

        rate_max = max(list_detect_rate)
        n_idx = list_detect_rate.index(rate_max)
        chracter_name = TARGET_CHARA[n_idx]

        cv2.rectangle(data_cv2, (ix, iy), (ix + iw, iy + ih), EDGE_COLOR, 3)
        cv2.rectangle(data_cv2, (ix, iy), (ix + iw, iy + ih), LINE_COLOR, 1)
        for txt_y in (-2, -1, 0, 1, 2):
            for txt_x in (-2, -1, 0, 1, 2):
                cv2.putText(data_cv2, "%d) %s" % (idx, chracter_name),
                            (ix + 3 + txt_x, iy + ih + 16 + txt_y),
                            cv2.FONT_HERSHEY_DUPLEX, 0.5, EDGE_COLOR)
        cv2.putText(data_cv2, "%d) %s" % (idx, chracter_name),
                    (ix + 3, iy + ih + 16), cv2.FONT_HERSHEY_DUPLEX, 0.5,
                    LINE_COLOR)

        list_result.append([face_pathname, chracter_name, list_detect_rate])

    imwork_clean(IMWORK_EXPIRE_SEC)

    pathname = os.path.join(IMWORK_DIR, "%s.jpg" % (image_hash, ))
    cv2.imwrite(pathname, data_cv2)

    return bottle.template("index", pathname=pathname, list_result=list_result)


if __name__ == "__main__":
    bottle.run(host="localhost", port=8001, debug=True, reloader=True)

# [EOF]