Caffe 算子测试#

%cd ../..
import set_env
/media/pc/data/lxw/ai/tvm-book/doc/topics
import os
import logging
import numpy as np

from google.protobuf import text_format
import caffe
from caffe import layers as L, params as P
from caffe.proto import caffe_pb2 as pb

import tvm
import tvm.testing
from tvm import relay
from tvm.contrib import graph_executor
from tvm.contrib.download import download_testdata
os.environ["GLOG_minloglevel"] = "2"

logging.basicConfig(level=logging.ERROR)

def save_prototxt(n_netspec, f_path):
    """Generate .prototxt file according to caffe.NetSpec"""
    s = n_netspec.to_proto()
    with open(f_path, "w") as f:
        f.write(str(s))


def save_solver(solver_file, proto_file, blob_file):
    """Define a solver proto, you can change the configs."""
    blob_file_prefix = blob_file.split(".caffemodel")[0]
    s = pb.SolverParameter()
    s.train_net = proto_file
    s.base_lr = 0.01
    s.momentum = 0.9
    s.weight_decay = 0.0005
    s.lr_policy = "inv"
    s.gamma = 0.0001
    s.power = 0.75
    s.display = 1
    s.max_iter = 100000
    s.snapshot = 100000
    s.snapshot_prefix = blob_file_prefix

    with open(solver_file, "w") as f:
        f.write(str(s))


def save_caffemodel(solver_file, blob_file):
    """Generate .caffemodel file."""
    solver = caffe.SGDSolver(solver_file)
    solver.net.save(blob_file)

def gen_model_files(n_netspec, proto_file, blob_file, solver_file):
    save_prototxt(n_netspec, proto_file)
    save_solver(solver_file, proto_file, blob_file)
    save_caffemodel(solver_file, blob_file)

def run_caffe(data, proto_file, blob_file):
    """Run caffe model by Caffe according to .caffemodel and .prototxt"""
    net = caffe.Net(proto_file, blob_file, caffe.TEST)
    if isinstance(data, (list, tuple)):
        for idx, d in enumerate(data):
            net.blobs["data" + str(idx)].data[...] = d
    else:
        net.blobs["data"].data[...] = data
    out = net.forward()

    caffe_output = []
    for i in range(len(out.keys())):
        if "output" + str(i) not in out.keys():
            caffe_output.clear()
            return list(out.values())
        caffe_output.append(out["output" + str(i)])
    return caffe_output
def siso_op(shape, func, *args, **kwargs):
    """Create single input and single output Caffe op"""
    n = caffe.NetSpec()
    n.data = L.Input(input_param={"shape": {"dim": list(shape)}})
    n.output = func(n.data, *args, **kwargs)
    return n

def miso_op(shapes, func, *args, **kwargs):
    """Create multi input and single output Caffe op"""
    n = caffe.NetSpec()
    if not isinstance(shapes, (tuple, list)):
        raise TypeError(f"Need tuple or list but get {type(shapes)}")
    input_list = []
    for idx, shape in enumerate(shapes):
        n["data" + str(idx)] = L.Input(input_param={"shape": {"dim": list(shape)}})
        input_list.append(n["data" + str(idx)])
    n.output = func(*input_list, *args, **kwargs)
    return n


def simo_op(shape, func, *args, **kwargs):
    """Create single input and multi output Caffe op"""
    n = caffe.NetSpec()
    n.data = L.Input(input_param={"shape": {"dim": list(shape)}})
    output_list = func(n.data, *args, **kwargs)
    for idx, out in enumerate(output_list):
        n["output" + str(idx)] = out
    return n
def creat_op(shapes, func_op, **kwargs):
    shape_list = []
    if isinstance(shapes, (list, tuple)):
        n = miso_op(shapes, func_op, **kwargs)
        for shape in shapes:
            shape_list.extend(list(shape))
    else:
        output_num = 1
        if "ntop" in kwargs:
            output_num = kwargs["ntop"]
        if output_num == 1:
            n = siso_op(shapes, func_op, **kwargs)
        else:
            n = simo_op(shapes, func_op, **kwargs)
        shape_list = list(shapes)
    return n, shape_list

caffe BatchNorm#

op_name = "BatchNorm"
root_dir = "./.temp"
proto_file = f"{root_dir}/{op_name}.prototxt"
blob_file = f"{root_dir}/{op_name}.caffemodel"
solver_file = f"{root_dir}/{op_name}_solver.prototxt"
shape = (1, 3, 10, 10)
n_netspec = siso_op(shape, L.BatchNorm, moving_average_fraction=0.999, eps=1e-5)
# obtain the .caffemodel file and .prototxt file
gen_model_files(n_netspec, proto_file, blob_file, solver_file)
# run model in Caffe
data = np.random.rand(*shape).astype(np.float32)
caffe_out = run_caffe(data, proto_file, blob_file)
WARNING: Logging before InitGoogleLogging() is written to STDERR
I0914 09:12:10.797545 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "./.temp/BatchNorm.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "./.temp/BatchNorm"
I0914 09:12:10.797695 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/BatchNorm.prototxt
I0914 09:12:10.798242 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data"
  type: "Input"
  top: "data"
  input_param {
    shape {
      dim: 1
      dim: 3
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "BatchNorm"
  bottom: "data"
  top: "output"
  batch_norm_param {
    moving_average_fraction: 0.999
    eps: 1e-05
  }
}
I0914 09:12:10.798317 1956768 layer_factory.hpp:77] Creating layer data
I0914 09:12:10.798336 1956768 net.cpp:86] Creating Layer data
I0914 09:12:10.798341 1956768 net.cpp:382] data -> data
I0914 09:12:10.798357 1956768 net.cpp:124] Setting up data
I0914 09:12:10.798362 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)
I0914 09:12:10.798367 1956768 net.cpp:139] Memory required for data: 1200
I0914 09:12:10.798370 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:10.798384 1956768 net.cpp:86] Creating Layer output
I0914 09:12:10.798388 1956768 net.cpp:408] output <- data
I0914 09:12:10.798391 1956768 net.cpp:382] output -> output
I0914 09:12:10.798404 1956768 net.cpp:124] Setting up output
I0914 09:12:10.798408 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)
I0914 09:12:10.798411 1956768 net.cpp:139] Memory required for data: 2400
I0914 09:12:10.798420 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:10.798424 1956768 net.cpp:202] data does not need backward computation.
I0914 09:12:10.798426 1956768 net.cpp:244] This network produces output output
I0914 09:12:10.798430 1956768 net.cpp:257] Network initialization done.
I0914 09:12:10.798439 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:10.799118 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:10.799129 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:10.799131 1956768 _caffe.cpp:142] Net('./.temp/BatchNorm.prototxt', 1, weights='./.temp/BatchNorm.caffemodel')
I0914 09:12:10.799191 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data"
  type: "Input"
  top: "data"
  input_param {
    shape {
      dim: 1
      dim: 3
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "BatchNorm"
  bottom: "data"
  top: "output"
  batch_norm_param {
    moving_average_fraction: 0.999
    eps: 1e-05
  }
}
I0914 09:12:10.799225 1956768 layer_factory.hpp:77] Creating layer data
I0914 09:12:10.799233 1956768 net.cpp:86] Creating Layer data
I0914 09:12:10.799237 1956768 net.cpp:382] data -> data
I0914 09:12:10.799245 1956768 net.cpp:124] Setting up data
I0914 09:12:10.799249 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)
I0914 09:12:10.799254 1956768 net.cpp:139] Memory required for data: 1200
I0914 09:12:10.799257 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:10.799263 1956768 net.cpp:86] Creating Layer output
I0914 09:12:10.799266 1956768 net.cpp:408] output <- data
I0914 09:12:10.799270 1956768 net.cpp:382] output -> output
I0914 09:12:10.799283 1956768 net.cpp:124] Setting up output
I0914 09:12:10.799285 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)
I0914 09:12:10.799289 1956768 net.cpp:139] Memory required for data: 2400
I0914 09:12:10.799297 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:10.799301 1956768 net.cpp:202] data does not need backward computation.
I0914 09:12:10.799304 1956768 net.cpp:244] This network produces output output
I0914 09:12:10.799309 1956768 net.cpp:257] Network initialization done.
I0914 09:12:10.800050 1956768 upgrade_proto.cpp:79] Attempting to upgrade batch norm layers using deprecated params: ./.temp/BatchNorm.caffemodel
I0914 09:12:10.800057 1956768 upgrade_proto.cpp:82] Successfully upgraded batch norm layers using deprecated params.
init_net = pb.NetParameter()
predict_net = pb.NetParameter()
# load model
with open(proto_file, "r") as f:
    text_format.Merge(f.read(), predict_net)
# load blob
with open(blob_file, "rb") as f:
    init_net.ParseFromString(f.read())
shape_dict = {"data": shape}
dtype_dict = {"data": "float32"}
mod, params = relay.frontend.from_caffe(init_net, predict_net, shape_dict, dtype_dict)
with tvm.transform.PassContext(opt_level=3):
    mod = relay.quantize.prerequisite_optimize(mod, params)
mod.show()
def @main(%data: Tensor[(1, 3, 10, 10), float32] /* ty=Tensor[(1, 3, 10, 10), float32] */) -> Tensor[(1, 3, 10, 10), float32] {
  %0 = multiply(%data, meta[relay.Constant][0] /* ty=Tensor[(3, 1, 1), float32] */) /* ty=Tensor[(1, 3, 10, 10), float32] */;
  add(%0, meta[relay.Constant][1] /* ty=Tensor[(3, 1, 1), float32] */) /* ty=Tensor[(1, 3, 10, 10), float32] */
}

caffe concat#

np.random.rand(), np.random.rand(1, 2, 10, 10)
(0.5722476838666538,
 array([[[[0.61123624, 0.37588209, 0.02169732, 0.18100491, 0.95849767,
           0.02395107, 0.20080887, 0.33766822, 0.11885211, 0.86539518],
          [0.10359814, 0.3886911 , 0.65470521, 0.05712175, 0.71074179,
           0.42249166, 0.37142238, 0.65421255, 0.99183236, 0.9922702 ],
          [0.11899492, 0.46457062, 0.89906396, 0.33470963, 0.48111082,
           0.86251228, 0.52144183, 0.91341972, 0.77010121, 0.49809878],
          [0.89288594, 0.59797711, 0.41111909, 0.37215822, 0.82308275,
           0.43773541, 0.48900654, 0.09655104, 0.6752475 , 0.73714895],
          [0.94505647, 0.751491  , 0.79973422, 0.49016049, 0.98444054,
           0.74379714, 0.10746913, 0.25731962, 0.25730886, 0.74203572],
          [0.31960305, 0.73112817, 0.87677291, 0.15800022, 0.80400933,
           0.44788138, 0.9895867 , 0.42715959, 0.71428694, 0.16113941],
          [0.1914514 , 0.75825183, 0.50116843, 0.65810231, 0.75718552,
           0.53124754, 0.4343793 , 0.32263688, 0.65770697, 0.61356753],
          [0.19378037, 0.6395654 , 0.94830926, 0.37040329, 0.43761136,
           0.79517072, 0.49994165, 0.87566226, 0.76793298, 0.08770912],
          [0.24788341, 0.76420478, 0.69379041, 0.31979159, 0.38141878,
           0.87133286, 0.48002551, 0.99319249, 0.92158005, 0.41340018],
          [0.76364848, 0.14923229, 0.58972456, 0.6497025 , 0.72261034,
           0.46587944, 0.65899584, 0.06284198, 0.10116845, 0.69050762]],
 
         [[0.55086051, 0.41049861, 0.72613161, 0.10209068, 0.77561294,
           0.60741189, 0.0990884 , 0.97687032, 0.48856226, 0.65001529],
          [0.42668348, 0.85970584, 0.9009646 , 0.16837185, 0.07728554,
           0.49002088, 0.14818526, 0.95035504, 0.87802133, 0.91886464],
          [0.18567169, 0.82489557, 0.0105433 , 0.08790348, 0.00896896,
           0.93911221, 0.69070265, 0.88624885, 0.01078138, 0.78754388],
          [0.9911567 , 0.3502372 , 0.86614406, 0.69255045, 0.10446331,
           0.99044252, 0.44933799, 0.89952532, 0.60757778, 0.81763927],
          [0.60009809, 0.65632015, 0.2662486 , 0.56157217, 0.82138236,
           0.1753327 , 0.01276561, 0.01786617, 0.58354147, 0.50236365],
          [0.15670643, 0.03074663, 0.57945941, 0.48933184, 0.47575001,
           0.2786405 , 0.30590399, 0.15175435, 0.95480545, 0.60934913],
          [0.62542567, 0.36184765, 0.48864965, 0.78854085, 0.47227953,
           0.20096491, 0.40530239, 0.66738742, 0.7947208 , 0.97882672],
          [0.71914019, 0.28877839, 0.77869619, 0.98258026, 0.75842418,
           0.27776089, 0.61026718, 0.11026115, 0.35014719, 0.85779448],
          [0.43332412, 0.13963806, 0.22700522, 0.2184799 , 0.95821739,
           0.60884812, 0.41895097, 0.39532951, 0.94120575, 0.88304259],
          [0.36188015, 0.71448186, 0.12941785, 0.59089551, 0.3402902 ,
           0.1287766 , 0.92626208, 0.31594477, 0.35793065, 0.74701252]]]]))
def _test_concat(shape_list, axis=1, op_name="concat",):
    proto_file = f"{root_dir}/{op_name}.prototxt"
    blob_file = f"{root_dir}/{op_name}.caffemodel"
    solver_file = f"{root_dir}/{op_name}_solver.prototxt"
    n_netspec = miso_op(shape_list, L.Concat, axis=axis)
    # obtain the .caffemodel file and .prototxt file
    gen_model_files(n_netspec, proto_file, blob_file, solver_file)
    # run model in Caffe
    data = [np.random.rand(*shape).astype(np.float32) for shape in shape_list]
    caffe_out = run_caffe(data, proto_file, blob_file)
    init_net = pb.NetParameter()
    predict_net = pb.NetParameter()
    # load model
    with open(proto_file, "r") as f:
        text_format.Merge(f.read(), predict_net)
    # load blob
    with open(blob_file, "rb") as f:
        init_net.ParseFromString(f.read())
    shape_dict = [{f"data_{k}": shape} for k, shape in enumerate(shape_list)]
    dtype_dict = [{f"data_{k}": "float32"} for k, shape in enumerate(shape_list)]
    mod, params = relay.frontend.from_caffe(init_net, predict_net, shape_dict, dtype_dict)
    return mod, params
mod, params = _test_concat([(1, 3, 10, 10), (1, 2, 10, 10)], axis=1)
I0914 09:12:11.276784 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "./.temp/concat.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "./.temp/concat"
I0914 09:12:11.276893 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/concat.prototxt
I0914 09:12:11.276965 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 1
      dim: 3
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 1
      dim: 2
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "Concat"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  concat_param {
    axis: 1
  }
}
I0914 09:12:11.277004 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:11.277014 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:11.277019 1956768 net.cpp:382] data0 -> data0
I0914 09:12:11.277031 1956768 net.cpp:124] Setting up data0
I0914 09:12:11.277035 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)
I0914 09:12:11.277040 1956768 net.cpp:139] Memory required for data: 1200
I0914 09:12:11.277045 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:11.277050 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:11.277055 1956768 net.cpp:382] data1 -> data1
I0914 09:12:11.277061 1956768 net.cpp:124] Setting up data1
I0914 09:12:11.277065 1956768 net.cpp:131] Top shape: 1 2 10 10 (200)
I0914 09:12:11.277068 1956768 net.cpp:139] Memory required for data: 2000
I0914 09:12:11.277072 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:11.277088 1956768 net.cpp:86] Creating Layer output
I0914 09:12:11.277092 1956768 net.cpp:408] output <- data0
I0914 09:12:11.277097 1956768 net.cpp:408] output <- data1
I0914 09:12:11.277102 1956768 net.cpp:382] output -> output
I0914 09:12:11.277109 1956768 net.cpp:124] Setting up output
I0914 09:12:11.277113 1956768 net.cpp:131] Top shape: 1 5 10 10 (500)
I0914 09:12:11.277117 1956768 net.cpp:139] Memory required for data: 4000
I0914 09:12:11.277120 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:11.277124 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:11.277127 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:11.277130 1956768 net.cpp:244] This network produces output output
I0914 09:12:11.277135 1956768 net.cpp:257] Network initialization done.
I0914 09:12:11.277148 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:11.277402 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:11.277412 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:11.277415 1956768 _caffe.cpp:142] Net('./.temp/concat.prototxt', 1, weights='./.temp/concat.caffemodel')
I0914 09:12:11.277477 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 1
      dim: 3
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 1
      dim: 2
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "Concat"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  concat_param {
    axis: 1
  }
}
I0914 09:12:11.277510 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:11.277516 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:11.277521 1956768 net.cpp:382] data0 -> data0
I0914 09:12:11.277529 1956768 net.cpp:124] Setting up data0
I0914 09:12:11.277534 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)
I0914 09:12:11.277537 1956768 net.cpp:139] Memory required for data: 1200
I0914 09:12:11.277541 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:11.277546 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:11.277550 1956768 net.cpp:382] data1 -> data1
I0914 09:12:11.277557 1956768 net.cpp:124] Setting up data1
I0914 09:12:11.277560 1956768 net.cpp:131] Top shape: 1 2 10 10 (200)
I0914 09:12:11.277565 1956768 net.cpp:139] Memory required for data: 2000
I0914 09:12:11.277567 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:11.277575 1956768 net.cpp:86] Creating Layer output
I0914 09:12:11.277577 1956768 net.cpp:408] output <- data0
I0914 09:12:11.277581 1956768 net.cpp:408] output <- data1
I0914 09:12:11.277586 1956768 net.cpp:382] output -> output
I0914 09:12:11.277592 1956768 net.cpp:124] Setting up output
I0914 09:12:11.277596 1956768 net.cpp:131] Top shape: 1 5 10 10 (500)
I0914 09:12:11.277599 1956768 net.cpp:139] Memory required for data: 4000
I0914 09:12:11.277603 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:11.277606 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:11.277609 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:11.277612 1956768 net.cpp:244] This network produces output output
I0914 09:12:11.277617 1956768 net.cpp:257] Network initialization done.
mod.show()
def @main(%data0, %data1) {
  %0 = (%data0, %data1);
  concatenate(%0, axis=1)
}
mod, params = _test_concat([(3, 10, 10), (2, 10, 10)], axis=0)
I0914 09:12:11.297618 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "./.temp/concat.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "./.temp/concat"
I0914 09:12:11.297690 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/concat.prototxt
I0914 09:12:11.297744 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 3
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 2
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "Concat"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  concat_param {
    axis: 0
  }
}
I0914 09:12:11.297775 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:11.297782 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:11.297786 1956768 net.cpp:382] data0 -> data0
I0914 09:12:11.297796 1956768 net.cpp:124] Setting up data0
I0914 09:12:11.297799 1956768 net.cpp:131] Top shape: 3 10 10 (300)
I0914 09:12:11.297804 1956768 net.cpp:139] Memory required for data: 1200
I0914 09:12:11.297807 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:11.297812 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:11.297816 1956768 net.cpp:382] data1 -> data1
I0914 09:12:11.297822 1956768 net.cpp:124] Setting up data1
I0914 09:12:11.297825 1956768 net.cpp:131] Top shape: 2 10 10 (200)
I0914 09:12:11.297829 1956768 net.cpp:139] Memory required for data: 2000
I0914 09:12:11.297832 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:11.297837 1956768 net.cpp:86] Creating Layer output
I0914 09:12:11.297842 1956768 net.cpp:408] output <- data0
I0914 09:12:11.297844 1956768 net.cpp:408] output <- data1
I0914 09:12:11.297849 1956768 net.cpp:382] output -> output
I0914 09:12:11.297855 1956768 net.cpp:124] Setting up output
I0914 09:12:11.297858 1956768 net.cpp:131] Top shape: 5 10 10 (500)
I0914 09:12:11.297863 1956768 net.cpp:139] Memory required for data: 4000
I0914 09:12:11.297865 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:11.297869 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:11.297873 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:11.297875 1956768 net.cpp:244] This network produces output output
I0914 09:12:11.297880 1956768 net.cpp:257] Network initialization done.
I0914 09:12:11.297889 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:11.298053 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:11.298061 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:11.298064 1956768 _caffe.cpp:142] Net('./.temp/concat.prototxt', 1, weights='./.temp/concat.caffemodel')
I0914 09:12:11.298123 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 3
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 2
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "Concat"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  concat_param {
    axis: 0
  }
}
I0914 09:12:11.298153 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:11.298161 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:11.298164 1956768 net.cpp:382] data0 -> data0
I0914 09:12:11.298172 1956768 net.cpp:124] Setting up data0
I0914 09:12:11.298177 1956768 net.cpp:131] Top shape: 3 10 10 (300)
I0914 09:12:11.298180 1956768 net.cpp:139] Memory required for data: 1200
I0914 09:12:11.298184 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:11.298188 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:11.298192 1956768 net.cpp:382] data1 -> data1
I0914 09:12:11.298198 1956768 net.cpp:124] Setting up data1
I0914 09:12:11.298202 1956768 net.cpp:131] Top shape: 2 10 10 (200)
I0914 09:12:11.298205 1956768 net.cpp:139] Memory required for data: 2000
I0914 09:12:11.298208 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:11.298215 1956768 net.cpp:86] Creating Layer output
I0914 09:12:11.298218 1956768 net.cpp:408] output <- data0
I0914 09:12:11.298223 1956768 net.cpp:408] output <- data1
I0914 09:12:11.298228 1956768 net.cpp:382] output -> output
I0914 09:12:11.298233 1956768 net.cpp:124] Setting up output
I0914 09:12:11.298236 1956768 net.cpp:131] Top shape: 5 10 10 (500)
I0914 09:12:11.298240 1956768 net.cpp:139] Memory required for data: 4000
I0914 09:12:11.298243 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:11.298246 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:11.298249 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:11.298252 1956768 net.cpp:244] This network produces output output
I0914 09:12:11.298256 1956768 net.cpp:257] Network initialization done.
mod.show()
def @main(%data0, %data1) {
  %0 = (%data0, %data1);
  concatenate(%0)
}

caffe Convolution#

def _test_conv2d(shape_list,  op_name="conv2d", **kwargs):
    proto_file = f"{root_dir}/{op_name}.prototxt"
    blob_file = f"{root_dir}/{op_name}.caffemodel"
    solver_file = f"{root_dir}/{op_name}_solver.prototxt"
    n_netspec = miso_op(shape_list, L.Convolution, **kwargs)
    # obtain the .caffemodel file and .prototxt file
    gen_model_files(n_netspec, proto_file, blob_file, solver_file)
    # run model in Caffe
    data = [np.random.rand(*shape).astype(np.float32) for shape in shape_list]
    caffe_out = run_caffe(data, proto_file, blob_file)
    init_net = pb.NetParameter()
    predict_net = pb.NetParameter()
    # load model
    with open(proto_file, "r") as f:
        text_format.Merge(f.read(), predict_net)
    # load blob
    with open(blob_file, "rb") as f:
        init_net.ParseFromString(f.read())
    shape_dict = [{f"data_{k}": shape} for k, shape in enumerate(shape_list)]
    dtype_dict = [{f"data_{k}": "float32"} for k, _ in enumerate(shape_list)]
    mod, params = relay.frontend.from_caffe(init_net, predict_net, shape_dict, dtype_dict)
    return mod, params
shape_list = [(1, 3, 10, 10)]
mod, params = _test_conv2d(
    shape_list,
    num_output=20,
    bias_term=True,
    pad=0,
    kernel_size=3,
    stride=2,
    dilation=1,
    weight_filler=dict(type="xavier"),
    bias_filler=dict(type="xavier"),
)
I0914 09:12:11.327899 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "./.temp/conv2d.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "./.temp/conv2d"
I0914 09:12:11.327972 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/conv2d.prototxt
I0914 09:12:11.328034 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 1
      dim: 3
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "Convolution"
  bottom: "data0"
  top: "output"
  convolution_param {
    num_output: 20
    bias_term: true
    pad: 0
    kernel_size: 3
    stride: 2
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "xavier"
    }
    dilation: 1
  }
}
I0914 09:12:11.328068 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:11.328075 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:11.328079 1956768 net.cpp:382] data0 -> data0
I0914 09:12:11.328088 1956768 net.cpp:124] Setting up data0
I0914 09:12:11.328092 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)
I0914 09:12:11.328097 1956768 net.cpp:139] Memory required for data: 1200
I0914 09:12:11.328100 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:11.328121 1956768 net.cpp:86] Creating Layer output
I0914 09:12:11.328126 1956768 net.cpp:408] output <- data0
I0914 09:12:11.328131 1956768 net.cpp:382] output -> output
I0914 09:12:11.328189 1956768 net.cpp:124] Setting up output
I0914 09:12:11.328195 1956768 net.cpp:131] Top shape: 1 20 4 4 (320)
I0914 09:12:11.328200 1956768 net.cpp:139] Memory required for data: 2480
I0914 09:12:11.328207 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:11.328210 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:11.328213 1956768 net.cpp:244] This network produces output output
I0914 09:12:11.328218 1956768 net.cpp:257] Network initialization done.
I0914 09:12:11.328227 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:11.328648 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:11.328657 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:11.328660 1956768 _caffe.cpp:142] Net('./.temp/conv2d.prototxt', 1, weights='./.temp/conv2d.caffemodel')
I0914 09:12:11.328723 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 1
      dim: 3
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "Convolution"
  bottom: "data0"
  top: "output"
  convolution_param {
    num_output: 20
    bias_term: true
    pad: 0
    kernel_size: 3
    stride: 2
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "xavier"
    }
    dilation: 1
  }
}
I0914 09:12:11.328754 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:11.328761 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:11.328765 1956768 net.cpp:382] data0 -> data0
I0914 09:12:11.328773 1956768 net.cpp:124] Setting up data0
I0914 09:12:11.328778 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)
I0914 09:12:11.328781 1956768 net.cpp:139] Memory required for data: 1200
I0914 09:12:11.328785 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:11.328792 1956768 net.cpp:86] Creating Layer output
I0914 09:12:11.328795 1956768 net.cpp:408] output <- data0
I0914 09:12:11.328800 1956768 net.cpp:382] output -> output
I0914 09:12:11.328820 1956768 net.cpp:124] Setting up output
I0914 09:12:11.328824 1956768 net.cpp:131] Top shape: 1 20 4 4 (320)
I0914 09:12:11.328828 1956768 net.cpp:139] Memory required for data: 2480
I0914 09:12:11.328835 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:11.328838 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:11.328841 1956768 net.cpp:244] This network produces output output
I0914 09:12:11.328845 1956768 net.cpp:257] Network initialization done.
mod.show()
def @main(%data0, %v_param_1: Tensor[(20, 3, 3, 3), float32], %v_param_2: Tensor[(20), float32]) {
  %0 = nn.conv2d(%data0, %v_param_1, strides=[2, 2], padding=[0, 0, 0, 0], channels=20, kernel_size=[3, 3]);
  nn.bias_add(%0, %v_param_2)
}
mod, params = _test_conv2d(
    shape_list,
    num_output=20,
    bias_term=False,
    pad=[1, 2],
    kernel_size=3,
    stride=2,
    dilation=1,
    weight_filler=dict(type="xavier"),
    bias_filler=dict(type="xavier"),
)
I0914 09:12:11.350931 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "./.temp/conv2d.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "./.temp/conv2d"
I0914 09:12:11.351027 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/conv2d.prototxt
I0914 09:12:11.351083 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 1
      dim: 3
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "Convolution"
  bottom: "data0"
  top: "output"
  convolution_param {
    num_output: 20
    bias_term: false
    pad: 1
    pad: 2
    kernel_size: 3
    stride: 2
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "xavier"
    }
    dilation: 1
  }
}
I0914 09:12:11.351116 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:11.351125 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:11.351128 1956768 net.cpp:382] data0 -> data0
I0914 09:12:11.351137 1956768 net.cpp:124] Setting up data0
I0914 09:12:11.351141 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)
I0914 09:12:11.351146 1956768 net.cpp:139] Memory required for data: 1200
I0914 09:12:11.351150 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:11.351156 1956768 net.cpp:86] Creating Layer output
I0914 09:12:11.351161 1956768 net.cpp:408] output <- data0
I0914 09:12:11.351166 1956768 net.cpp:382] output -> output
I0914 09:12:11.351183 1956768 net.cpp:124] Setting up output
I0914 09:12:11.351187 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)
I0914 09:12:11.351191 1956768 net.cpp:139] Memory required for data: 3600
I0914 09:12:11.351197 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:11.351200 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:11.351203 1956768 net.cpp:244] This network produces output output
I0914 09:12:11.351207 1956768 net.cpp:257] Network initialization done.
I0914 09:12:11.351217 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:11.351402 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:11.351411 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:11.351414 1956768 _caffe.cpp:142] Net('./.temp/conv2d.prototxt', 1, weights='./.temp/conv2d.caffemodel')
I0914 09:12:11.351477 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 1
      dim: 3
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "Convolution"
  bottom: "data0"
  top: "output"
  convolution_param {
    num_output: 20
    bias_term: false
    pad: 1
    pad: 2
    kernel_size: 3
    stride: 2
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "xavier"
    }
    dilation: 1
  }
}
I0914 09:12:11.351508 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:11.351516 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:11.351519 1956768 net.cpp:382] data0 -> data0
I0914 09:12:11.351527 1956768 net.cpp:124] Setting up data0
I0914 09:12:11.351531 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)
I0914 09:12:11.351536 1956768 net.cpp:139] Memory required for data: 1200
I0914 09:12:11.351538 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:11.351545 1956768 net.cpp:86] Creating Layer output
I0914 09:12:11.351549 1956768 net.cpp:408] output <- data0
I0914 09:12:11.351553 1956768 net.cpp:382] output -> output
I0914 09:12:11.351572 1956768 net.cpp:124] Setting up output
I0914 09:12:11.351575 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)
I0914 09:12:11.351579 1956768 net.cpp:139] Memory required for data: 3600
I0914 09:12:11.351584 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:11.351588 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:11.351590 1956768 net.cpp:244] This network produces output output
I0914 09:12:11.351595 1956768 net.cpp:257] Network initialization done.
mod.show()
def @main(%data0, %v_param_1: Tensor[(20, 3, 3, 3), float32]) {
  nn.conv2d(%data0, %v_param_1, strides=[2, 2], padding=[1, 2, 1, 2], channels=20, kernel_size=[3, 3])
}
mod, params = _test_conv2d(
    shape_list,
    num_output=20,
    bias_term=True,
    pad=[1, 2],
    kernel_size=[3, 5],
    stride=[2, 1],
    dilation=[1, 2],
    weight_filler=dict(type="xavier"),
    bias_filler=dict(type="xavier"),
)
I0914 09:12:11.373440 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "./.temp/conv2d.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "./.temp/conv2d"
I0914 09:12:11.373510 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/conv2d.prototxt
I0914 09:12:11.373567 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 1
      dim: 3
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "Convolution"
  bottom: "data0"
  top: "output"
  convolution_param {
    num_output: 20
    bias_term: true
    pad: 1
    pad: 2
    kernel_size: 3
    kernel_size: 5
    stride: 2
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "xavier"
    }
    dilation: 1
    dilation: 2
  }
}
I0914 09:12:11.373600 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:11.373607 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:11.373612 1956768 net.cpp:382] data0 -> data0
I0914 09:12:11.373620 1956768 net.cpp:124] Setting up data0
I0914 09:12:11.373625 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)
I0914 09:12:11.373629 1956768 net.cpp:139] Memory required for data: 1200
I0914 09:12:11.373633 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:11.373639 1956768 net.cpp:86] Creating Layer output
I0914 09:12:11.373643 1956768 net.cpp:408] output <- data0
I0914 09:12:11.373648 1956768 net.cpp:382] output -> output
I0914 09:12:11.373670 1956768 net.cpp:124] Setting up output
I0914 09:12:11.373674 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)
I0914 09:12:11.373678 1956768 net.cpp:139] Memory required for data: 3600
I0914 09:12:11.373685 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:11.373688 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:11.373692 1956768 net.cpp:244] This network produces output output
I0914 09:12:11.373696 1956768 net.cpp:257] Network initialization done.
I0914 09:12:11.373704 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:11.373950 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:11.373958 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:11.373962 1956768 _caffe.cpp:142] Net('./.temp/conv2d.prototxt', 1, weights='./.temp/conv2d.caffemodel')
I0914 09:12:11.374025 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 1
      dim: 3
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "Convolution"
  bottom: "data0"
  top: "output"
  convolution_param {
    num_output: 20
    bias_term: true
    pad: 1
    pad: 2
    kernel_size: 3
    kernel_size: 5
    stride: 2
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "xavier"
    }
    dilation: 1
    dilation: 2
  }
}
I0914 09:12:11.374056 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:11.374063 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:11.374068 1956768 net.cpp:382] data0 -> data0
I0914 09:12:11.374075 1956768 net.cpp:124] Setting up data0
I0914 09:12:11.374079 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)
I0914 09:12:11.374084 1956768 net.cpp:139] Memory required for data: 1200
I0914 09:12:11.374089 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:11.374094 1956768 net.cpp:86] Creating Layer output
I0914 09:12:11.374099 1956768 net.cpp:408] output <- data0
I0914 09:12:11.374104 1956768 net.cpp:382] output -> output
I0914 09:12:11.374125 1956768 net.cpp:124] Setting up output
I0914 09:12:11.374128 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)
I0914 09:12:11.374132 1956768 net.cpp:139] Memory required for data: 3600
I0914 09:12:11.374138 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:11.374142 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:11.374145 1956768 net.cpp:244] This network produces output output
I0914 09:12:11.374150 1956768 net.cpp:257] Network initialization done.
mod.show()
def @main(%data0, %v_param_1: Tensor[(20, 3, 3, 5), float32], %v_param_2: Tensor[(20), float32]) {
  %0 = nn.conv2d(%data0, %v_param_1, strides=[2, 1], padding=[1, 2, 1, 2], dilation=[1, 2], channels=20, kernel_size=[3, 5]);
  nn.bias_add(%0, %v_param_2)
}
mod, params = _test_conv2d(
    shape_list,
    num_output=20,
    bias_term=True,
    pad_h=1,
    pad_w=2,
    kernel_h=3,
    kernel_w=5,
    stride_h=2,
    stride_w=1,
    dilation=[1, 2],
    weight_filler=dict(type="xavier"),
    bias_filler=dict(type="xavier"),
)
I0914 09:12:11.396004 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "./.temp/conv2d.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "./.temp/conv2d"
I0914 09:12:11.396075 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/conv2d.prototxt
I0914 09:12:11.396133 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 1
      dim: 3
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "Convolution"
  bottom: "data0"
  top: "output"
  convolution_param {
    num_output: 20
    bias_term: true
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "xavier"
    }
    pad_h: 1
    pad_w: 2
    kernel_h: 3
    kernel_w: 5
    stride_h: 2
    stride_w: 1
    dilation: 1
    dilation: 2
  }
}
I0914 09:12:11.396165 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:11.396173 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:11.396178 1956768 net.cpp:382] data0 -> data0
I0914 09:12:11.396186 1956768 net.cpp:124] Setting up data0
I0914 09:12:11.396190 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)
I0914 09:12:11.396195 1956768 net.cpp:139] Memory required for data: 1200
I0914 09:12:11.396198 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:11.396205 1956768 net.cpp:86] Creating Layer output
I0914 09:12:11.396209 1956768 net.cpp:408] output <- data0
I0914 09:12:11.396214 1956768 net.cpp:382] output -> output
I0914 09:12:11.396234 1956768 net.cpp:124] Setting up output
I0914 09:12:11.396239 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)
I0914 09:12:11.396243 1956768 net.cpp:139] Memory required for data: 3600
I0914 09:12:11.396250 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:11.396252 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:11.396255 1956768 net.cpp:244] This network produces output output
I0914 09:12:11.396260 1956768 net.cpp:257] Network initialization done.
I0914 09:12:11.396268 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:11.396432 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:11.396442 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:11.396445 1956768 _caffe.cpp:142] Net('./.temp/conv2d.prototxt', 1, weights='./.temp/conv2d.caffemodel')
I0914 09:12:11.396507 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 1
      dim: 3
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "Convolution"
  bottom: "data0"
  top: "output"
  convolution_param {
    num_output: 20
    bias_term: true
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "xavier"
    }
    pad_h: 1
    pad_w: 2
    kernel_h: 3
    kernel_w: 5
    stride_h: 2
    stride_w: 1
    dilation: 1
    dilation: 2
  }
}
I0914 09:12:11.396538 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:11.396545 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:11.396549 1956768 net.cpp:382] data0 -> data0
I0914 09:12:11.396557 1956768 net.cpp:124] Setting up data0
I0914 09:12:11.396561 1956768 net.cpp:131] Top shape: 1 3 10 10 (300)
I0914 09:12:11.396565 1956768 net.cpp:139] Memory required for data: 1200
I0914 09:12:11.396569 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:11.396575 1956768 net.cpp:86] Creating Layer output
I0914 09:12:11.396579 1956768 net.cpp:408] output <- data0
I0914 09:12:11.396584 1956768 net.cpp:382] output -> output
I0914 09:12:11.396605 1956768 net.cpp:124] Setting up output
I0914 09:12:11.396608 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)
I0914 09:12:11.396612 1956768 net.cpp:139] Memory required for data: 3600
I0914 09:12:11.396618 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:11.396621 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:11.396624 1956768 net.cpp:244] This network produces output output
I0914 09:12:11.396628 1956768 net.cpp:257] Network initialization done.
mod.show()
def @main(%data0, %v_param_1: Tensor[(20, 3, 3, 5), float32], %v_param_2: Tensor[(20), float32]) {
  %0 = nn.conv2d(%data0, %v_param_1, strides=[2, 1], padding=[1, 2, 1, 2], dilation=[1, 2], channels=20, kernel_size=[3, 5]);
  nn.bias_add(%0, %v_param_2)
}
mod, params = _test_conv2d(
    [(1, 2, 10, 10)],
    num_output=20,
    bias_term=True,
    pad=[1, 2],
    kernel_size=[3, 5],
    stride=[2, 1],
    dilation=[1, 2],
    weight_filler=dict(type="xavier"),
    bias_filler=dict(type="xavier"),
    group=2,
)
I0914 09:12:11.421512 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "./.temp/conv2d.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "./.temp/conv2d"
I0914 09:12:11.421586 1956768 solver.cpp:92] Creating training net from train_net file: ./.temp/conv2d.prototxt
I0914 09:12:11.421649 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 1
      dim: 2
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "Convolution"
  bottom: "data0"
  top: "output"
  convolution_param {
    num_output: 20
    bias_term: true
    pad: 1
    pad: 2
    kernel_size: 3
    kernel_size: 5
    group: 2
    stride: 2
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "xavier"
    }
    dilation: 1
    dilation: 2
  }
}
I0914 09:12:11.421682 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:11.421690 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:11.421695 1956768 net.cpp:382] data0 -> data0
I0914 09:12:11.421705 1956768 net.cpp:124] Setting up data0
I0914 09:12:11.421708 1956768 net.cpp:131] Top shape: 1 2 10 10 (200)
I0914 09:12:11.421712 1956768 net.cpp:139] Memory required for data: 800
I0914 09:12:11.421716 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:11.421723 1956768 net.cpp:86] Creating Layer output
I0914 09:12:11.421726 1956768 net.cpp:408] output <- data0
I0914 09:12:11.421731 1956768 net.cpp:382] output -> output
I0914 09:12:11.421751 1956768 net.cpp:124] Setting up output
I0914 09:12:11.421754 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)
I0914 09:12:11.421758 1956768 net.cpp:139] Memory required for data: 3200
I0914 09:12:11.421765 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:11.421769 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:11.421772 1956768 net.cpp:244] This network produces output output
I0914 09:12:11.421777 1956768 net.cpp:257] Network initialization done.
I0914 09:12:11.421784 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:11.421972 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:11.421981 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:11.421985 1956768 _caffe.cpp:142] Net('./.temp/conv2d.prototxt', 1, weights='./.temp/conv2d.caffemodel')
I0914 09:12:11.422055 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 1
      dim: 2
      dim: 10
      dim: 10
    }
  }
}
layer {
  name: "output"
  type: "Convolution"
  bottom: "data0"
  top: "output"
  convolution_param {
    num_output: 20
    bias_term: true
    pad: 1
    pad: 2
    kernel_size: 3
    kernel_size: 5
    group: 2
    stride: 2
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "xavier"
    }
    dilation: 1
    dilation: 2
  }
}
I0914 09:12:11.422086 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:11.422092 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:11.422096 1956768 net.cpp:382] data0 -> data0
I0914 09:12:11.422104 1956768 net.cpp:124] Setting up data0
I0914 09:12:11.422108 1956768 net.cpp:131] Top shape: 1 2 10 10 (200)
I0914 09:12:11.422112 1956768 net.cpp:139] Memory required for data: 800
I0914 09:12:11.422116 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:11.422122 1956768 net.cpp:86] Creating Layer output
I0914 09:12:11.422127 1956768 net.cpp:408] output <- data0
I0914 09:12:11.422132 1956768 net.cpp:382] output -> output
I0914 09:12:11.422149 1956768 net.cpp:124] Setting up output
I0914 09:12:11.422153 1956768 net.cpp:131] Top shape: 1 20 5 6 (600)
I0914 09:12:11.422158 1956768 net.cpp:139] Memory required for data: 3200
I0914 09:12:11.422163 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:11.422168 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:11.422170 1956768 net.cpp:244] This network produces output output
I0914 09:12:11.422174 1956768 net.cpp:257] Network initialization done.
mod.show()
def @main(%data0, %v_param_1: Tensor[(20, 1, 3, 5), float32], %v_param_2: Tensor[(20), float32]) {
  %0 = nn.conv2d(%data0, %v_param_1, strides=[2, 1], padding=[1, 2, 1, 2], dilation=[1, 2], groups=2, channels=20, kernel_size=[3, 5]);
  nn.bias_add(%0, %v_param_2)
}

caffe crop#

from caffe_utils import _test_op

def _test_crop(data, **kwargs):
    """One iteration of Crop"""
    _test_op(data, L.Crop, "Crop", **kwargs)


def test_forward_Crop():
    """Crop"""
    _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)])
    _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1)
    _test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1, offset=2)
    _test_crop(
        [np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1, offset=[1, 2, 4]
    )
    _test_crop(
        [np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=2, offset=[2, 4]
    )
    _test_crop([np.random.rand(10, 120, 120), np.random.rand(5, 50, 60)], axis=1, offset=[2, 4])
    _test_crop([np.random.rand(120, 120), np.random.rand(50, 60)], axis=0, offset=[2, 4])
test_forward_Crop()
I0914 09:12:30.343927 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60"
I0914 09:12:30.344040 1956768 solver.cpp:92] Creating training net from train_net file: /home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60.prototxt
I0914 09:12:30.344126 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 10
      dim: 10
      dim: 120
      dim: 120
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 10
      dim: 5
      dim: 50
      dim: 60
    }
  }
}
layer {
  name: "output"
  type: "Crop"
  bottom: "data0"
  bottom: "data1"
  top: "output"
}
I0914 09:12:30.344178 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:30.344192 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:30.344200 1956768 net.cpp:382] data0 -> data0
I0914 09:12:30.344216 1956768 net.cpp:124] Setting up data0
I0914 09:12:30.344221 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)
I0914 09:12:30.344230 1956768 net.cpp:139] Memory required for data: 5760000
I0914 09:12:30.344236 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:30.344244 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:30.344250 1956768 net.cpp:382] data1 -> data1
I0914 09:12:30.344260 1956768 net.cpp:124] Setting up data1
I0914 09:12:30.344265 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:30.344271 1956768 net.cpp:139] Memory required for data: 6360000
I0914 09:12:30.344276 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:30.344298 1956768 net.cpp:86] Creating Layer output
I0914 09:12:30.344303 1956768 net.cpp:408] output <- data0
I0914 09:12:30.344309 1956768 net.cpp:408] output <- data1
I0914 09:12:30.344316 1956768 net.cpp:382] output -> output
I0914 09:12:30.344332 1956768 net.cpp:124] Setting up output
I0914 09:12:30.344336 1956768 net.cpp:131] Top shape: 10 10 50 60 (300000)
I0914 09:12:30.344343 1956768 net.cpp:139] Memory required for data: 7560000
I0914 09:12:30.344348 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:30.344352 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:30.344357 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:30.344362 1956768 net.cpp:244] This network produces output output
I0914 09:12:30.344368 1956768 net.cpp:257] Network initialization done.
I0914 09:12:30.344383 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:30.344502 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:30.344512 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:30.344517 1956768 _caffe.cpp:142] Net('/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60.prototxt', 1, weights='/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60.caffemodel')
I0914 09:12:30.344587 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 10
      dim: 10
      dim: 120
      dim: 120
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 10
      dim: 5
      dim: 50
      dim: 60
    }
  }
}
layer {
  name: "output"
  type: "Crop"
  bottom: "data0"
  bottom: "data1"
  top: "output"
}
I0914 09:12:30.344625 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:30.344635 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:30.344640 1956768 net.cpp:382] data0 -> data0
I0914 09:12:30.344650 1956768 net.cpp:124] Setting up data0
I0914 09:12:30.344655 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)
I0914 09:12:30.344663 1956768 net.cpp:139] Memory required for data: 5760000
I0914 09:12:30.344668 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:30.344676 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:30.344681 1956768 net.cpp:382] data1 -> data1
I0914 09:12:30.344688 1956768 net.cpp:124] Setting up data1
I0914 09:12:30.344692 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:30.344700 1956768 net.cpp:139] Memory required for data: 6360000
I0914 09:12:30.344705 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:30.344712 1956768 net.cpp:86] Creating Layer output
I0914 09:12:30.344717 1956768 net.cpp:408] output <- data0
I0914 09:12:30.344722 1956768 net.cpp:408] output <- data1
I0914 09:12:30.344727 1956768 net.cpp:382] output -> output
I0914 09:12:30.344738 1956768 net.cpp:124] Setting up output
I0914 09:12:30.344743 1956768 net.cpp:131] Top shape: 10 10 50 60 (300000)
I0914 09:12:30.344748 1956768 net.cpp:139] Memory required for data: 7560000
I0914 09:12:30.344753 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:30.344758 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:30.344761 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:30.344765 1956768 net.cpp:244] This network produces output output
I0914 09:12:30.344771 1956768 net.cpp:257] Network initialization done.
I0914 09:12:31.032979 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1"
I0914 09:12:31.033103 1956768 solver.cpp:92] Creating training net from train_net file: /home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1.prototxt
I0914 09:12:31.033195 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 10
      dim: 10
      dim: 120
      dim: 120
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 10
      dim: 5
      dim: 50
      dim: 60
    }
  }
}
layer {
  name: "output"
  type: "Crop"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  crop_param {
    axis: 1
  }
}
I0914 09:12:31.033250 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:31.033263 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:31.033272 1956768 net.cpp:382] data0 -> data0
I0914 09:12:31.033289 1956768 net.cpp:124] Setting up data0
I0914 09:12:31.033295 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)
I0914 09:12:31.033304 1956768 net.cpp:139] Memory required for data: 5760000
I0914 09:12:31.033313 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:31.033320 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:31.033326 1956768 net.cpp:382] data1 -> data1
I0914 09:12:31.033335 1956768 net.cpp:124] Setting up data1
I0914 09:12:31.033339 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:31.033346 1956768 net.cpp:139] Memory required for data: 6360000
I0914 09:12:31.033350 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:31.033358 1956768 net.cpp:86] Creating Layer output
I0914 09:12:31.033363 1956768 net.cpp:408] output <- data0
I0914 09:12:31.033370 1956768 net.cpp:408] output <- data1
I0914 09:12:31.033376 1956768 net.cpp:382] output -> output
I0914 09:12:31.033390 1956768 net.cpp:124] Setting up output
I0914 09:12:31.033396 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:31.033401 1956768 net.cpp:139] Memory required for data: 6960000
I0914 09:12:31.033406 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:31.033411 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:31.033416 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:31.033421 1956768 net.cpp:244] This network produces output output
I0914 09:12:31.033427 1956768 net.cpp:257] Network initialization done.
I0914 09:12:31.033443 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:31.033558 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:31.033568 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:31.033573 1956768 _caffe.cpp:142] Net('/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1.prototxt', 1, weights='/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1.caffemodel')
I0914 09:12:31.033648 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 10
      dim: 10
      dim: 120
      dim: 120
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 10
      dim: 5
      dim: 50
      dim: 60
    }
  }
}
layer {
  name: "output"
  type: "Crop"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  crop_param {
    axis: 1
  }
}
I0914 09:12:31.033689 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:31.033696 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:31.033702 1956768 net.cpp:382] data0 -> data0
I0914 09:12:31.033713 1956768 net.cpp:124] Setting up data0
I0914 09:12:31.033718 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)
I0914 09:12:31.033725 1956768 net.cpp:139] Memory required for data: 5760000
I0914 09:12:31.033731 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:31.033737 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:31.033744 1956768 net.cpp:382] data1 -> data1
I0914 09:12:31.033752 1956768 net.cpp:124] Setting up data1
I0914 09:12:31.033758 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:31.033764 1956768 net.cpp:139] Memory required for data: 6360000
I0914 09:12:31.033769 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:31.033775 1956768 net.cpp:86] Creating Layer output
I0914 09:12:31.033780 1956768 net.cpp:408] output <- data0
I0914 09:12:31.033785 1956768 net.cpp:408] output <- data1
I0914 09:12:31.033792 1956768 net.cpp:382] output -> output
I0914 09:12:31.033803 1956768 net.cpp:124] Setting up output
I0914 09:12:31.033808 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:31.033814 1956768 net.cpp:139] Memory required for data: 6960000
I0914 09:12:31.033819 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:31.033824 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:31.033828 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:31.033833 1956768 net.cpp:244] This network produces output output
I0914 09:12:31.033839 1956768 net.cpp:257] Network initialization done.
I0914 09:12:31.330210 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_2.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_2"
I0914 09:12:31.330330 1956768 solver.cpp:92] Creating training net from train_net file: /home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_2.prototxt
I0914 09:12:31.330410 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 10
      dim: 10
      dim: 120
      dim: 120
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 10
      dim: 5
      dim: 50
      dim: 60
    }
  }
}
layer {
  name: "output"
  type: "Crop"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  crop_param {
    axis: 1
    offset: 2
  }
}
I0914 09:12:31.330456 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:31.330467 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:31.330474 1956768 net.cpp:382] data0 -> data0
I0914 09:12:31.330490 1956768 net.cpp:124] Setting up data0
I0914 09:12:31.330494 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)
I0914 09:12:31.330502 1956768 net.cpp:139] Memory required for data: 5760000
I0914 09:12:31.330507 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:31.330513 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:31.330518 1956768 net.cpp:382] data1 -> data1
I0914 09:12:31.330528 1956768 net.cpp:124] Setting up data1
I0914 09:12:31.330530 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:31.330536 1956768 net.cpp:139] Memory required for data: 6360000
I0914 09:12:31.330540 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:31.330546 1956768 net.cpp:86] Creating Layer output
I0914 09:12:31.330551 1956768 net.cpp:408] output <- data0
I0914 09:12:31.330555 1956768 net.cpp:408] output <- data1
I0914 09:12:31.330561 1956768 net.cpp:382] output -> output
I0914 09:12:31.330574 1956768 net.cpp:124] Setting up output
I0914 09:12:31.330577 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:31.330582 1956768 net.cpp:139] Memory required for data: 6960000
I0914 09:12:31.330586 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:31.330590 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:31.330595 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:31.330598 1956768 net.cpp:244] This network produces output output
I0914 09:12:31.330605 1956768 net.cpp:257] Network initialization done.
I0914 09:12:31.330617 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:31.330727 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:31.330736 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:31.330740 1956768 _caffe.cpp:142] Net('/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_2.prototxt', 1, weights='/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_2.caffemodel')
I0914 09:12:31.330806 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 10
      dim: 10
      dim: 120
      dim: 120
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 10
      dim: 5
      dim: 50
      dim: 60
    }
  }
}
layer {
  name: "output"
  type: "Crop"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  crop_param {
    axis: 1
    offset: 2
  }
}
I0914 09:12:31.330840 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:31.330848 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:31.330853 1956768 net.cpp:382] data0 -> data0
I0914 09:12:31.330863 1956768 net.cpp:124] Setting up data0
I0914 09:12:31.330868 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)
I0914 09:12:31.330873 1956768 net.cpp:139] Memory required for data: 5760000
I0914 09:12:31.330878 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:31.330883 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:31.330888 1956768 net.cpp:382] data1 -> data1
I0914 09:12:31.330896 1956768 net.cpp:124] Setting up data1
I0914 09:12:31.330900 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:31.330905 1956768 net.cpp:139] Memory required for data: 6360000
I0914 09:12:31.330909 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:31.330914 1956768 net.cpp:86] Creating Layer output
I0914 09:12:31.330919 1956768 net.cpp:408] output <- data0
I0914 09:12:31.330922 1956768 net.cpp:408] output <- data1
I0914 09:12:31.330929 1956768 net.cpp:382] output -> output
I0914 09:12:31.330937 1956768 net.cpp:124] Setting up output
I0914 09:12:31.330941 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:31.340920 1956768 net.cpp:139] Memory required for data: 6960000
I0914 09:12:31.340931 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:31.340935 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:31.340939 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:31.340942 1956768 net.cpp:244] This network produces output output
I0914 09:12:31.340947 1956768 net.cpp:257] Network initialization done.
I0914 09:12:31.659914 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_1_2_4.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_1_2_4"
I0914 09:12:31.660022 1956768 solver.cpp:92] Creating training net from train_net file: /home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_1_2_4.prototxt
I0914 09:12:31.660095 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 10
      dim: 10
      dim: 120
      dim: 120
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 10
      dim: 5
      dim: 50
      dim: 60
    }
  }
}
layer {
  name: "output"
  type: "Crop"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  crop_param {
    axis: 1
    offset: 1
    offset: 2
    offset: 4
  }
}
I0914 09:12:31.660136 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:31.660146 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:31.660152 1956768 net.cpp:382] data0 -> data0
I0914 09:12:31.660166 1956768 net.cpp:124] Setting up data0
I0914 09:12:31.660171 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)
I0914 09:12:31.660177 1956768 net.cpp:139] Memory required for data: 5760000
I0914 09:12:31.660181 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:31.660187 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:31.660192 1956768 net.cpp:382] data1 -> data1
I0914 09:12:31.660199 1956768 net.cpp:124] Setting up data1
I0914 09:12:31.660202 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:31.660207 1956768 net.cpp:139] Memory required for data: 6360000
I0914 09:12:31.660210 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:31.660215 1956768 net.cpp:86] Creating Layer output
I0914 09:12:31.660219 1956768 net.cpp:408] output <- data0
I0914 09:12:31.660224 1956768 net.cpp:408] output <- data1
I0914 09:12:31.660228 1956768 net.cpp:382] output -> output
I0914 09:12:31.660239 1956768 net.cpp:124] Setting up output
I0914 09:12:31.660243 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:31.660247 1956768 net.cpp:139] Memory required for data: 6960000
I0914 09:12:31.660250 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:31.660254 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:31.660257 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:31.660260 1956768 net.cpp:244] This network produces output output
I0914 09:12:31.660265 1956768 net.cpp:257] Network initialization done.
I0914 09:12:31.660279 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:31.660377 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:31.660384 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:31.660387 1956768 _caffe.cpp:142] Net('/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_1_2_4.prototxt', 1, weights='/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_1_1_2_4.caffemodel')
I0914 09:12:31.660446 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 10
      dim: 10
      dim: 120
      dim: 120
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 10
      dim: 5
      dim: 50
      dim: 60
    }
  }
}
layer {
  name: "output"
  type: "Crop"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  crop_param {
    axis: 1
    offset: 1
    offset: 2
    offset: 4
  }
}
I0914 09:12:31.660477 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:31.660483 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:31.660488 1956768 net.cpp:382] data0 -> data0
I0914 09:12:31.660496 1956768 net.cpp:124] Setting up data0
I0914 09:12:31.660501 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)
I0914 09:12:31.660506 1956768 net.cpp:139] Memory required for data: 5760000
I0914 09:12:31.660509 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:31.660514 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:31.660521 1956768 net.cpp:382] data1 -> data1
I0914 09:12:31.660526 1956768 net.cpp:124] Setting up data1
I0914 09:12:31.660530 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:31.660534 1956768 net.cpp:139] Memory required for data: 6360000
I0914 09:12:31.660537 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:31.660542 1956768 net.cpp:86] Creating Layer output
I0914 09:12:31.660545 1956768 net.cpp:408] output <- data0
I0914 09:12:31.660549 1956768 net.cpp:408] output <- data1
I0914 09:12:31.660553 1956768 net.cpp:382] output -> output
I0914 09:12:31.660562 1956768 net.cpp:124] Setting up output
I0914 09:12:31.660564 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:31.660569 1956768 net.cpp:139] Memory required for data: 6960000
I0914 09:12:31.660573 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:31.660575 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:31.660578 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:31.660581 1956768 net.cpp:244] This network produces output output
I0914 09:12:31.660585 1956768 net.cpp:257] Network initialization done.
I0914 09:12:32.000172 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_2_2_4.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_2_2_4"
I0914 09:12:32.000275 1956768 solver.cpp:92] Creating training net from train_net file: /home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_2_2_4.prototxt
I0914 09:12:32.000347 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 10
      dim: 10
      dim: 120
      dim: 120
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 10
      dim: 5
      dim: 50
      dim: 60
    }
  }
}
layer {
  name: "output"
  type: "Crop"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  crop_param {
    axis: 2
    offset: 2
    offset: 4
  }
}
I0914 09:12:32.000389 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:32.000399 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:32.000406 1956768 net.cpp:382] data0 -> data0
I0914 09:12:32.000419 1956768 net.cpp:124] Setting up data0
I0914 09:12:32.000423 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)
I0914 09:12:32.000430 1956768 net.cpp:139] Memory required for data: 5760000
I0914 09:12:32.000433 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:32.000439 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:32.000444 1956768 net.cpp:382] data1 -> data1
I0914 09:12:32.000450 1956768 net.cpp:124] Setting up data1
I0914 09:12:32.000454 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:32.000459 1956768 net.cpp:139] Memory required for data: 6360000
I0914 09:12:32.000463 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:32.000468 1956768 net.cpp:86] Creating Layer output
I0914 09:12:32.000473 1956768 net.cpp:408] output <- data0
I0914 09:12:32.000476 1956768 net.cpp:408] output <- data1
I0914 09:12:32.000481 1956768 net.cpp:382] output -> output
I0914 09:12:32.000491 1956768 net.cpp:124] Setting up output
I0914 09:12:32.000495 1956768 net.cpp:131] Top shape: 10 10 50 60 (300000)
I0914 09:12:32.000500 1956768 net.cpp:139] Memory required for data: 7560000
I0914 09:12:32.000504 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:32.000507 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:32.000510 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:32.000514 1956768 net.cpp:244] This network produces output output
I0914 09:12:32.000519 1956768 net.cpp:257] Network initialization done.
I0914 09:12:32.000531 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:32.000640 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:32.000648 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:32.000651 1956768 _caffe.cpp:142] Net('/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_2_2_4.prototxt', 1, weights='/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_10_120_120_10_5_50_60_2_2_4.caffemodel')
I0914 09:12:32.000711 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 10
      dim: 10
      dim: 120
      dim: 120
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 10
      dim: 5
      dim: 50
      dim: 60
    }
  }
}
layer {
  name: "output"
  type: "Crop"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  crop_param {
    axis: 2
    offset: 2
    offset: 4
  }
}
I0914 09:12:32.000746 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:32.000751 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:32.000756 1956768 net.cpp:382] data0 -> data0
I0914 09:12:32.000764 1956768 net.cpp:124] Setting up data0
I0914 09:12:32.000768 1956768 net.cpp:131] Top shape: 10 10 120 120 (1440000)
I0914 09:12:32.000773 1956768 net.cpp:139] Memory required for data: 5760000
I0914 09:12:32.000777 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:32.000782 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:32.000788 1956768 net.cpp:382] data1 -> data1
I0914 09:12:32.000793 1956768 net.cpp:124] Setting up data1
I0914 09:12:32.000797 1956768 net.cpp:131] Top shape: 10 5 50 60 (150000)
I0914 09:12:32.000802 1956768 net.cpp:139] Memory required for data: 6360000
I0914 09:12:32.000805 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:32.000809 1956768 net.cpp:86] Creating Layer output
I0914 09:12:32.000813 1956768 net.cpp:408] output <- data0
I0914 09:12:32.000818 1956768 net.cpp:408] output <- data1
I0914 09:12:32.000821 1956768 net.cpp:382] output -> output
I0914 09:12:32.000829 1956768 net.cpp:124] Setting up output
I0914 09:12:32.000833 1956768 net.cpp:131] Top shape: 10 10 50 60 (300000)
I0914 09:12:32.000837 1956768 net.cpp:139] Memory required for data: 7560000
I0914 09:12:32.000840 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:32.000844 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:32.000847 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:32.000850 1956768 net.cpp:244] This network produces output output
I0914 09:12:32.000854 1956768 net.cpp:257] Network initialization done.
I0914 09:12:32.305437 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_120_120_5_50_60_1_2_4.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_120_120_5_50_60_1_2_4"
I0914 09:12:32.305541 1956768 solver.cpp:92] Creating training net from train_net file: /home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_120_120_5_50_60_1_2_4.prototxt
I0914 09:12:32.305613 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 10
      dim: 120
      dim: 120
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 5
      dim: 50
      dim: 60
    }
  }
}
layer {
  name: "output"
  type: "Crop"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  crop_param {
    axis: 1
    offset: 2
    offset: 4
  }
}
I0914 09:12:32.305655 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:32.305665 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:32.305670 1956768 net.cpp:382] data0 -> data0
I0914 09:12:32.305683 1956768 net.cpp:124] Setting up data0
I0914 09:12:32.305688 1956768 net.cpp:131] Top shape: 10 120 120 (144000)
I0914 09:12:32.305694 1956768 net.cpp:139] Memory required for data: 576000
I0914 09:12:32.305697 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:32.305703 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:32.305707 1956768 net.cpp:382] data1 -> data1
I0914 09:12:32.305714 1956768 net.cpp:124] Setting up data1
I0914 09:12:32.305717 1956768 net.cpp:131] Top shape: 5 50 60 (15000)
I0914 09:12:32.305721 1956768 net.cpp:139] Memory required for data: 636000
I0914 09:12:32.305724 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:32.305730 1956768 net.cpp:86] Creating Layer output
I0914 09:12:32.305734 1956768 net.cpp:408] output <- data0
I0914 09:12:32.305738 1956768 net.cpp:408] output <- data1
I0914 09:12:32.305743 1956768 net.cpp:382] output -> output
I0914 09:12:32.305752 1956768 net.cpp:124] Setting up output
I0914 09:12:32.305756 1956768 net.cpp:131] Top shape: 10 50 60 (30000)
I0914 09:12:32.305760 1956768 net.cpp:139] Memory required for data: 756000
I0914 09:12:32.305764 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:32.305768 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:32.305770 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:32.305774 1956768 net.cpp:244] This network produces output output
I0914 09:12:32.305779 1956768 net.cpp:257] Network initialization done.
I0914 09:12:32.305790 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:32.305888 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:32.305896 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:32.305899 1956768 _caffe.cpp:142] Net('/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_120_120_5_50_60_1_2_4.prototxt', 1, weights='/home/ai/.tvm_test_data/caffe_test/Crop/Crop_10_120_120_5_50_60_1_2_4.caffemodel')
I0914 09:12:32.305958 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 10
      dim: 120
      dim: 120
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 5
      dim: 50
      dim: 60
    }
  }
}
layer {
  name: "output"
  type: "Crop"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  crop_param {
    axis: 1
    offset: 2
    offset: 4
  }
}
I0914 09:12:32.305989 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:32.305995 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:32.306000 1956768 net.cpp:382] data0 -> data0
I0914 09:12:32.306008 1956768 net.cpp:124] Setting up data0
I0914 09:12:32.306011 1956768 net.cpp:131] Top shape: 10 120 120 (144000)
I0914 09:12:32.306017 1956768 net.cpp:139] Memory required for data: 576000
I0914 09:12:32.306020 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:32.306025 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:32.306030 1956768 net.cpp:382] data1 -> data1
I0914 09:12:32.306035 1956768 net.cpp:124] Setting up data1
I0914 09:12:32.306041 1956768 net.cpp:131] Top shape: 5 50 60 (15000)
I0914 09:12:32.306046 1956768 net.cpp:139] Memory required for data: 636000
I0914 09:12:32.306048 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:32.306053 1956768 net.cpp:86] Creating Layer output
I0914 09:12:32.306056 1956768 net.cpp:408] output <- data0
I0914 09:12:32.306061 1956768 net.cpp:408] output <- data1
I0914 09:12:32.306064 1956768 net.cpp:382] output -> output
I0914 09:12:32.306072 1956768 net.cpp:124] Setting up output
I0914 09:12:32.306077 1956768 net.cpp:131] Top shape: 10 50 60 (30000)
I0914 09:12:32.306080 1956768 net.cpp:139] Memory required for data: 756000
I0914 09:12:32.306083 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:32.306087 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:32.306090 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:32.306093 1956768 net.cpp:244] This network produces output output
I0914 09:12:32.306097 1956768 net.cpp:257] Network initialization done.
I0914 09:12:32.559347 1956768 solver.cpp:45] Initializing solver from parameters: 
train_net: "/home/ai/.tvm_test_data/caffe_test/Crop/Crop_120_120_50_60_0_2_4.prototxt"
base_lr: 0.01
display: 1
max_iter: 100000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 100000
snapshot_prefix: "/home/ai/.tvm_test_data/caffe_test/Crop/Crop_120_120_50_60_0_2_4"
I0914 09:12:32.559437 1956768 solver.cpp:92] Creating training net from train_net file: /home/ai/.tvm_test_data/caffe_test/Crop/Crop_120_120_50_60_0_2_4.prototxt
I0914 09:12:32.559511 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TRAIN
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 120
      dim: 120
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 50
      dim: 60
    }
  }
}
layer {
  name: "output"
  type: "Crop"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  crop_param {
    axis: 0
    offset: 2
    offset: 4
  }
}
I0914 09:12:32.559547 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:32.559556 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:32.559561 1956768 net.cpp:382] data0 -> data0
I0914 09:12:32.559571 1956768 net.cpp:124] Setting up data0
I0914 09:12:32.559574 1956768 net.cpp:131] Top shape: 120 120 (14400)
I0914 09:12:32.559579 1956768 net.cpp:139] Memory required for data: 57600
I0914 09:12:32.559583 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:32.559587 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:32.559592 1956768 net.cpp:382] data1 -> data1
I0914 09:12:32.559598 1956768 net.cpp:124] Setting up data1
I0914 09:12:32.559602 1956768 net.cpp:131] Top shape: 50 60 (3000)
I0914 09:12:32.559605 1956768 net.cpp:139] Memory required for data: 69600
I0914 09:12:32.559608 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:32.559613 1956768 net.cpp:86] Creating Layer output
I0914 09:12:32.559617 1956768 net.cpp:408] output <- data0
I0914 09:12:32.559621 1956768 net.cpp:408] output <- data1
I0914 09:12:32.559625 1956768 net.cpp:382] output -> output
I0914 09:12:32.559634 1956768 net.cpp:124] Setting up output
I0914 09:12:32.559638 1956768 net.cpp:131] Top shape: 50 60 (3000)
I0914 09:12:32.559641 1956768 net.cpp:139] Memory required for data: 81600
I0914 09:12:32.559644 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:32.559648 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:32.559651 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:32.559654 1956768 net.cpp:244] This network produces output output
I0914 09:12:32.559659 1956768 net.cpp:257] Network initialization done.
I0914 09:12:32.559669 1956768 solver.cpp:57] Solver scaffolding done.
W0914 09:12:32.559760 1956768 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W0914 09:12:32.559768 1956768 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W0914 09:12:32.559772 1956768 _caffe.cpp:142] Net('/home/ai/.tvm_test_data/caffe_test/Crop/Crop_120_120_50_60_0_2_4.prototxt', 1, weights='/home/ai/.tvm_test_data/caffe_test/Crop/Crop_120_120_50_60_0_2_4.caffemodel')
I0914 09:12:32.559827 1956768 net.cpp:53] Initializing net from parameters: 
state {
  phase: TEST
  level: 0
}
layer {
  name: "data0"
  type: "Input"
  top: "data0"
  input_param {
    shape {
      dim: 120
      dim: 120
    }
  }
}
layer {
  name: "data1"
  type: "Input"
  top: "data1"
  input_param {
    shape {
      dim: 50
      dim: 60
    }
  }
}
layer {
  name: "output"
  type: "Crop"
  bottom: "data0"
  bottom: "data1"
  top: "output"
  crop_param {
    axis: 0
    offset: 2
    offset: 4
  }
}
I0914 09:12:32.559859 1956768 layer_factory.hpp:77] Creating layer data0
I0914 09:12:32.559864 1956768 net.cpp:86] Creating Layer data0
I0914 09:12:32.559870 1956768 net.cpp:382] data0 -> data0
I0914 09:12:32.559876 1956768 net.cpp:124] Setting up data0
I0914 09:12:32.559880 1956768 net.cpp:131] Top shape: 120 120 (14400)
I0914 09:12:32.559885 1956768 net.cpp:139] Memory required for data: 57600
I0914 09:12:32.559888 1956768 layer_factory.hpp:77] Creating layer data1
I0914 09:12:32.559892 1956768 net.cpp:86] Creating Layer data1
I0914 09:12:32.559896 1956768 net.cpp:382] data1 -> data1
I0914 09:12:32.559901 1956768 net.cpp:124] Setting up data1
I0914 09:12:32.559906 1956768 net.cpp:131] Top shape: 50 60 (3000)
I0914 09:12:32.559911 1956768 net.cpp:139] Memory required for data: 69600
I0914 09:12:32.559914 1956768 layer_factory.hpp:77] Creating layer output
I0914 09:12:32.559918 1956768 net.cpp:86] Creating Layer output
I0914 09:12:32.559922 1956768 net.cpp:408] output <- data0
I0914 09:12:32.559926 1956768 net.cpp:408] output <- data1
I0914 09:12:32.559931 1956768 net.cpp:382] output -> output
I0914 09:12:32.559937 1956768 net.cpp:124] Setting up output
I0914 09:12:32.559942 1956768 net.cpp:131] Top shape: 50 60 (3000)
I0914 09:12:32.559944 1956768 net.cpp:139] Memory required for data: 81600
I0914 09:12:32.559947 1956768 net.cpp:202] output does not need backward computation.
I0914 09:12:32.559952 1956768 net.cpp:202] data1 does not need backward computation.
I0914 09:12:32.559954 1956768 net.cpp:202] data0 does not need backward computation.
I0914 09:12:32.559957 1956768 net.cpp:244] This network produces output output
I0914 09:12:32.559962 1956768 net.cpp:257] Network initialization done.

caffe Deconvolution#

def _test_deconvolution(data, **kwargs):
    """One iteration of Deconvolution"""
    _test_op(data, L.Deconvolution, "Deconvolution", **kwargs)


def test_forward_Deconvolution():
    """Deconvolution"""
    data = np.random.rand(1, 16, 32, 32).astype(np.float32)
    _test_deconvolution(
        data,
        convolution_param=dict(
            num_output=20,
            bias_term=True,
            pad=0,
            kernel_size=3,
            stride=2,
            dilation=1,
            weight_filler=dict(type="xavier"),
            bias_filler=dict(type="xavier"),
        ),
    )
    _test_deconvolution(
        data,
        convolution_param=dict(
            num_output=20,
            bias_term=False,
            pad=[1, 2],
            kernel_size=3,
            stride=2,
            dilation=1,
            weight_filler=dict(type="xavier"),
            bias_filler=dict(type="xavier"),
        ),
    )
    _test_deconvolution(
        data,
        convolution_param=dict(
            num_output=20,
            bias_term=True,
            pad_h=1,
            pad_w=2,
            kernel_h=3,
            kernel_w=5,
            stride_h=2,
            stride_w=1,
            dilation=1,
            weight_filler=dict(type="xavier"),
            bias_filler=dict(type="xavier"),
        ),
    )
    _test_deconvolution(
        data,
        convolution_param=dict(
            num_output=16,
            bias_term=False,
            pad=0,
            kernel_size=2,
            stride=2,
            dilation=1,
            group=16,
            weight_filler=dict(type="xavier"),
            bias_filler=dict(type="xavier"),
        ),
    )
    data = np.random.rand(1, 100, 32, 32).astype(np.float32)
    _test_deconvolution(
        data,
        convolution_param=dict(
            num_output=100,
            bias_term=False,
            pad=0,
            kernel_size=2,
            stride=2,
            dilation=1,
            group=100,
            weight_filler=dict(type="xavier"),
            bias_filler=dict(type="xavier"),
        ),
    )

caffe Dropout#

def _test_dropout(data, **kwargs):
    """One iteration of Dropout"""
    _test_op(data, L.Dropout, "Dropout", **kwargs)


def test_forward_Dropout():
    """Dropout"""
    data = np.random.rand(1, 3, 10, 10).astype(np.float32)
    _test_dropout(data)
    _test_dropout(data, dropout_ratio=0.7)

caffe Eltwise#

def _test_eltwise(data_list, **kwargs):
    """One iteration of Eltwise"""
    _test_op(data_list, L.Eltwise, "Eltwise", **kwargs)


def test_forward_Eltwise():
    """Eltwise"""
    _test_eltwise(
        [
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
        ],
        operation=0,
    )
    _test_eltwise(
        [
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
        ],
        operation=1,
    )
    _test_eltwise(
        [
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
        ],
        operation=2,
    )
    _test_eltwise(
        [
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
        ],
        operation=1,
        coeff=[0.5, 1],
    )
    _test_eltwise(
        [
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
        ],
        operation=0,
    )
    _test_eltwise(
        [
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
        ],
        operation=1,
    )
    _test_eltwise(
        [
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
        ],
        operation=2,
    )
    _test_eltwise(
        [
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
            np.random.rand(1, 3, 10, 11).astype(np.float32),
        ],
        operation=1,
        coeff=[0.5, 1, 0.2, 1.8, 3.1, 0.1],
    )

caffe Flatten#

def _test_flatten(data, axis=1):
    """One iteration of Flatten"""
    _test_op(data, L.Flatten, "Flatten", axis=axis)


def test_forward_Flatten():
    """Flatten"""
    data = np.random.rand(1, 3, 10, 10).astype(np.float32)
    _test_flatten(data)
    _test_flatten(data, axis=1)

caffe InnerProduct#

def _test_inner_product(data, **kwargs):
    """One iteration of InnerProduct"""
    _test_op(data, L.InnerProduct, "InnerProduct", **kwargs)


def test_forward_InnerProduct():
    """InnerProduct"""
    data = np.random.rand(1, 3, 10, 10)
    _test_inner_product(data, num_output=20, bias_term=False, weight_filler=dict(type="xavier"))
    _test_inner_product(
        data,
        num_output=20,
        bias_term=True,
        weight_filler=dict(type="xavier"),
        bias_filler=dict(type="xavier"),
    )
    _test_inner_product(
        np.random.rand(20, 10).astype(np.float32),
        num_output=30,
        bias_term=True,
        weight_filler=dict(type="xavier"),
        bias_filler=dict(type="xavier"),
    )

caffe LRN#

def _test_lrn(data, local_size=5, alpha=1.0, beta=0.75, k=1.0):
    """One iteration of LRN"""
    _test_op(data, L.LRN, "LRN", local_size=local_size, alpha=alpha, beta=beta, k=k)


def test_forward_LRN():
    """LRN"""
    data = np.random.rand(1, 3, 10, 10).astype(np.float32)
    _test_lrn(data)
    _test_lrn(data, local_size=3)
    _test_lrn(data, local_size=3, alpha=2.0)
    _test_lrn(
        data,
        local_size=3,
        alpha=2.0,
        beta=0.5,
    )
    _test_lrn(data, local_size=3, alpha=2.0, beta=0.5, k=2.0)

caffe Permute#

def _test_permute(data, **kwargs):
    """One iteration of Permute."""
    _test_op(data, L.Permute, "Permute", **kwargs)


def test_forward_Permute():
    """Permute"""
    data = np.random.rand(2, 3, 4).astype(np.float32)
    _test_permute(data, permute_param={"order": [0, 1, 2]})
    _test_permute(data, permute_param={"order": [0, 2, 1]})
    _test_permute(data, permute_param={"order": [1, 0, 2]})
    _test_permute(data, permute_param={"order": [1, 2, 0]})
    _test_permute(data, permute_param={"order": [2, 0, 1]})
    _test_permute(data, permute_param={"order": [2, 1, 0]})

caffe Pooling#

def _test_pooling(data, **kwargs):
    """One iteration of Pooling."""
    _test_op(data, L.Pooling, "Pooling", **kwargs)


def test_forward_Pooling():
    """Pooing"""
    data = np.random.rand(1, 3, 10, 10).astype(np.float32)
    # MAX Pooling
    _test_pooling(data, kernel_size=2, stride=2, pad=0, pool=P.Pooling.MAX)
    _test_pooling(
        data, kernel_h=2, kernel_w=3, stride_h=2, stride_w=1, pad_h=1, pad_w=2, pool=P.Pooling.MAX
    )
    _test_pooling(data, pool=P.Pooling.MAX, global_pooling=True)

    # AVE Pooing
    _test_pooling(data, kernel_size=2, stride=2, pad=0, pool=P.Pooling.AVE)
    _test_pooling(
        data, kernel_h=2, kernel_w=3, stride_h=2, stride_w=1, pad_h=1, pad_w=2, pool=P.Pooling.AVE
    )
    _test_pooling(data, pool=P.Pooling.AVE, global_pooling=True)

caffe Power#

def _test_power(data, **kwargs):
    """One iteration of Power."""
    _test_op(data, L.Power, "Power", **kwargs)


def test_forward_Power():
    """Power"""
    data = np.random.rand(1, 3, 10, 10).astype(np.float32)
    _test_power(data, power_param={"power": 0.37, "scale": 0.83, "shift": -2.4})
    _test_power(data, power_param={"power": 0.37, "scale": 0.83, "shift": 0.0})
    _test_power(data, power_param={"power": 0.0, "scale": 0.83, "shift": -2.4})
    _test_power(data, power_param={"power": 1.0, "scale": 0.83, "shift": -2.4})
    _test_power(data, power_param={"power": 2.0, "scale": 0.34, "shift": -2.4})
    _test_power(data, power_param={"power": 1.0, "scale": 1.0, "shift": 0.0})

caffe PReLU#

def _test_prelu(data, **kwargs):
    """One iteration of PReLU."""
    _test_op(data, L.PReLU, "PReLU", **kwargs)


def test_forward_PReLU():
    """PReLU"""
    data = np.random.rand(1, 3, 10, 10).astype(np.float32)
    _test_prelu(data, filler=dict(type="constant", value=0.5))
    _test_prelu(data)
    _test_prelu(np.random.rand(10, 20).astype(np.float32))

caffe ReLU#

def _test_relu(data, **kwargs):
    """One iteration of ReLU."""
    _test_op(data, L.ReLU, "ReLU", **kwargs)


def test_forward_ReLU():
    """ReLU"""
    data = np.random.rand(1, 3, 10, 10).astype(np.float32)
    _test_relu(data)
    _test_relu(np.random.rand(10, 20).astype(np.float32))

caffe Reshape#

def _test_reshape(data, **kwargs):
    """One iteration of Reshape."""
    _test_op(data, L.Reshape, "Reshape", **kwargs)


def test_forward_Reshape():
    """Reshape"""
    data = np.random.rand(1, 8, 6).astype(np.float32)
    _test_reshape(data, reshape_param={"shape": {"dim": [4, 3, 4]}})
    _test_reshape(data, reshape_param={"shape": {"dim": [2, 0, 3]}})
    _test_reshape(data, reshape_param={"shape": {"dim": [2, 0, -1]}})
    _test_reshape(data, reshape_param={"shape": {"dim": [0, -1]}})

    _test_reshape(data, reshape_param={"shape": {"dim": [2, 3]}, "axis": 2})
    _test_reshape(data, reshape_param={"shape": {"dim": [4, 3, 4]}, "axis": 1})
    _test_reshape(data, reshape_param={"shape": {"dim": [4, 3, 4]}, "axis": -3})

    _test_reshape(data, reshape_param={"shape": {"dim": [2, 4]}, "axis": 1, "num_axes": 1})
    _test_reshape(data, reshape_param={"shape": {"dim": [3, 16]}, "axis": 1, "num_axes": 2})

caffe Scale#

def _test_scale(data, **kwargs):
    """One iteration of Scale."""
    _test_op(data, L.Scale, "Scale", **kwargs)


def test_forward_Scale():
    """Scale"""
    data = np.random.rand(1, 3, 10, 10).astype(np.float32)
    _test_scale(data, filler=dict(type="xavier"))
    _test_scale(data, filler=dict(type="xavier"), bias_term=True, bias_filler=dict(type="xavier"))

caffe Sigmoid#

def _test_sigmoid(data, **kwargs):
    """One iteration of Sigmoid."""
    _test_op(data, L.Sigmoid, "Sigmoid", **kwargs)


def test_forward_Sigmoid():
    """Sigmoid"""
    data = np.random.rand(1, 3, 10, 10).astype(np.float32)
    _test_sigmoid(data)

caffe Slice#

def _test_slice(data, **kwargs):
    """One iteration of Slice"""
    _test_op(data, L.Slice, "Slice", **kwargs)


def test_forward_Slice():
    """Slice"""
    data = np.random.rand(1, 3, 10, 10).astype(np.float32)
    _test_slice(data, ntop=2, slice_param=dict(axis=1, slice_point=[1]))
    _test_slice(data, ntop=2, slice_param=dict(axis=-1, slice_point=[1]))
    _test_slice(data, ntop=3, slice_param=dict(axis=2, slice_point=[1, 6]))
    _test_slice(data, ntop=3)

caffe Softmax#

def _test_softmax(data, **kwargs):
    """One iteration of Softmax"""
    _test_op(data, L.Softmax, "Softmax", **kwargs)


def test_forward_Softmax():
    """Softmax"""
    _test_softmax(np.random.rand(1, 3, 10, 10).astype(np.float32))
    _test_softmax(np.random.rand(1, 3, 10, 10).astype(np.float32), axis=2)
    _test_softmax(np.random.rand(10, 10).astype(np.float32), axis=0)
    _test_softmax(np.random.rand(2, 10, 10).astype(np.float32), axis=1)

caffe TanH#

def _test_tanh(data, **kwargs):
    """One iteration of TanH"""
    _test_op(data, L.TanH, "TanH", **kwargs)


def test_forward_TanH():
    """TanH"""
    _test_tanh(np.random.rand(1, 3, 10, 10).astype(np.float32))
    _test_tanh(np.random.rand(3, 10, 10).astype(np.float32))
    _test_tanh(np.random.rand(10, 10).astype(np.float32))
    _test_tanh(np.random.rand(10).astype(np.float32))

caffe Reduction#

def _test_reduction(data, **kwargs):
    """One iteration of Reduction"""
    _test_op(data, L.Reduction, "Reduction", **kwargs)


def test_forward_Reduction():
    """Reduction"""
    reduction_op = {"SUM": 1, "ASUM": 2, "SUMSQ": 3, "MEAN": 4}
    _test_reduction(np.random.rand(10).astype(np.float32), operation=reduction_op["SUM"], axis=0)
    _test_reduction(
        np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op["SUM"], axis=3
    )
    _test_reduction(
        np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op["SUM"], axis=1
    )
    _test_reduction(
        np.random.rand(10).astype(np.float32), operation=reduction_op["SUM"], axis=0, coeff=0.5
    )
    _test_reduction(
        np.random.rand(10, 20, 30, 40).astype(np.float32),
        operation=reduction_op["SUM"],
        axis=3,
        coeff=5.0,
    )
    _test_reduction(np.random.rand(10).astype(np.float32), operation=reduction_op["ASUM"])
    _test_reduction(
        np.random.rand(10, 20).astype(np.float32), operation=reduction_op["ASUM"], axis=1
    )
    _test_reduction(
        np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op["ASUM"], axis=3
    )
    _test_reduction(
        np.random.rand(10).astype(np.float32), operation=reduction_op["ASUM"], axis=0, coeff=0.0
    )
    _test_reduction(
        np.random.rand(10, 20, 30).astype(np.float32),
        operation=reduction_op["ASUM"],
        axis=2,
        coeff=7.0,
    )
    _test_reduction(
        np.random.rand(10, 20, 30, 40, 10).astype(np.float32),
        operation=reduction_op["ASUM"],
        axis=3,
        coeff=1.0,
    )
    _test_reduction(np.random.rand(10).astype(np.float32), operation=reduction_op["SUMSQ"], axis=0)
    _test_reduction(
        np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op["SUMSQ"], axis=3
    )
    _test_reduction(
        np.random.rand(10).astype(np.float32), operation=reduction_op["SUMSQ"], axis=0, coeff=0.0
    )
    _test_reduction(
        np.random.rand(10, 20, 30, 40, 50).astype(np.float32),
        operation=reduction_op["SUMSQ"],
        axis=4,
        coeff=2.0,
    )
    _test_reduction(np.random.rand(10).astype(np.float32), operation=reduction_op["MEAN"], axis=0)
    _test_reduction(
        np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op["MEAN"], axis=3
    )
    _test_reduction(
        np.random.rand(10).astype(np.float32), operation=reduction_op["MEAN"], axis=0, coeff=0.0
    )
    _test_reduction(
        np.random.rand(10, 20, 30, 40).astype(np.float32),
        operation=reduction_op["MEAN"],
        axis=3,
        coeff=2.0,
    )

caffe Embed#

def _test_embed(data, **kwargs):
    """One iteration of Embed"""
    _test_op(data, L.Embed, "Embed", **kwargs)


def test_forward_Embed():
    """Embed"""
    k = 20
    data = list(i for i in range(k))
    np.random.shuffle(data)
    # dimension is 1
    data = np.asarray(data)
    _test_embed(
        data,
        num_output=30,
        input_dim=k,
        bias_term=True,
        weight_filler=dict(type="xavier"),
        bias_filler=dict(type="xavier"),
    )
    _test_embed(
        data,
        num_output=30,
        input_dim=k,
        bias_term=False,
        weight_filler=dict(type="xavier"),
        bias_filler=dict(type="xavier"),
    )
    # dimension is 2
    data = np.reshape(data, [4, 5])
    _test_embed(
        data,
        num_output=30,
        input_dim=k,
        bias_term=True,
        weight_filler=dict(type="xavier"),
        bias_filler=dict(type="xavier"),
    )
    _test_embed(
        data,
        num_output=30,
        input_dim=k,
        bias_term=False,
        weight_filler=dict(type="xavier"),
        bias_filler=dict(type="xavier"),
    )
    # dimension is 3
    data = np.reshape(data, [2, 2, 5])
    _test_embed(
        data,
        num_output=30,
        input_dim=k,
        bias_term=True,
        weight_filler=dict(type="xavier"),
        bias_filler=dict(type="xavier"),
    )
    _test_embed(
        data,
        num_output=30,
        input_dim=k,
        bias_term=False,
        weight_filler=dict(type="xavier"),
        bias_filler=dict(type="xavier"),
    )
    # dimension is 4
    data = np.reshape(data, [2, 2, 5, 1])
    _test_embed(
        data,
        num_output=30,
        input_dim=k,
        bias_term=True,
        weight_filler=dict(type="xavier"),
        bias_filler=dict(type="xavier"),
    )
    _test_embed(
        data,
        num_output=30,
        input_dim=k,
        bias_term=False,
        weight_filler=dict(type="xavier"),
        bias_filler=dict(type="xavier"),
    )