mergify[bot] on master
fix(trace_yolox): bbox shifted … (compare)
mergify[bot] on master
fix(torch): data augmentation h… (compare)
15# boost::current_exception_diagnostic_information[abi:cxx11](bool) at /usr/include/boost/exception/diagnostic_information.hpp:47
16# dd::Services::add_service(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, mapbox::util::variant<dd::MLService<dd::CaffeLib, dd::ImgCaffeInputFileConn, dd::SupervisedOutput, dd::CaffeModel>, dd::MLService<dd::CaffeLib, dd::CSVCaffeInputFileConn, dd::SupervisedOutput, dd::CaffeModel>, dd::MLService<dd::CaffeLib, dd::CSVTSCaffeInputFileConn, dd::SupervisedOutput, dd::CaffeModel>, dd::MLService<dd::CaffeLib, dd::TxtCaffeInputFileConn, dd::SupervisedOutput, dd::CaffeModel>, dd::MLService<dd::CaffeLib, dd::SVMCaffeInputFileConn, dd::SupervisedOutput, dd::CaffeModel>, dd::MLService<dd::CaffeLib, dd::ImgCaffeInputFileConn, dd::UnsupervisedOutput, dd::CaffeModel>, dd::MLService<dd::CaffeLib, dd::CSVCaffeInputFileConn, dd::UnsupervisedOutput, dd::CaffeModel>, dd::MLService<dd::CaffeLib, dd::CSVTSCaffeInputFileConn, dd::UnsupervisedOutput, dd::CaffeModel>, dd::MLService<dd::CaffeLib, dd::TxtCaffeInputFileConn, dd::UnsupervisedOutput, dd::CaffeModel>, dd::MLService<dd::CaffeLib, dd::SVMCaffeInputFileConn, dd::UnsupervisedOutput, dd::CaffeModel>, dd::MLService<dd::TorchLib, dd::ImgTorchInputFileConn, dd::SupervisedOutput, dd::TorchModel>, dd::MLService<dd::TorchLib, dd::VideoTorchInputFileConn, dd::SupervisedOutput, dd::TorchModel>, dd::MLService<dd::TorchLib, dd::TxtTorchInputFileConn, dd::SupervisedOutput, dd::TorchModel>, dd::MLService<dd::TorchLib, dd::CSVTSTorchInputFileConn, dd::SupervisedOutput, dd::TorchModel> >&&, dd::APIData const&) at /deepdetect/src/services.h:429
17# dd::JsonAPI::service_create(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) at /deepdetect/src/jsonapi.cc:718
18# DedeController::Z__PROXY_METHOD_update_service(std::shared_ptr<oatpp::web::protocol::http::incoming::Request> const&) at /deepdetect/src/http/controller.hpp:132
19# oatpp::web::server::api::ApiController::Handler<DedeController>::handle(std::shared_ptr<oatpp::web::protocol::http::incoming::Request> const&) at /deepdetect/build/oatpp/src/oatpp/src/oatpp/web/server/api/ApiController.hpp:300
20# oatpp::web::server::HttpProcessor::processNextRequest(oatpp::web::server::HttpProcessor::ProcessingResources&, std::shared_ptr<oatpp::web::protocol::http::incoming::Request> const&, oatpp::web::protocol::http::utils::CommunicationUtils::ConnectionState&) in ./main/dede
21# oatpp::web::server::HttpProcessor::processNextRequest(oatpp::web::server::HttpProcessor::ProcessingResources&) in ./main/dede
22# oatpp::web::server::HttpProcessor::Task::run() in ./main/dede
23# 0x00007F9C07148DE4 in /lib/x86_64-linux-gnu/libstdc++.so.6
24# start_thread at /build/glibc-SzIz7B/glibc-2.31/nptl/pthread_create.c:478
25# __clone at ../sysdeps/unix/sysv/linux/x86_64/clone.S:97
Aborted (core dumped)
class Predictor(nn.Module):
def __init__(self):
super().__init__()
MainModel = imp.load_source('MainModel', '/content/drive/MyDrive/TFtoTorchConversion/OpenImages/openimages.py')
self.model = torch.load('/content/drive/MyDrive/TFtoTorchConversion/OpenImages/openimages.pth')
self.model.eval()
self.transforms = nn.Sequential(
T.Resize([299, 299]),
T.ConvertImageDtype(torch.float) #,
)
with open('/content/drive/MyDrive/TFtoTorchConversion/OpenImages/corresp-utf8.txt') as f:
self.labels = [' '.join(l.strip().split(' ')[1:]) for l in f.readlines()]
def forward(self, x: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
x = self.transforms(x)
y_pred = self.model(x)
return y_pred
x = torch.unsqueeze(self.transforms(x), 0)
is now x = self.transforms(x)
example = torchvision.io.read_image(os.path.join(data_path, exampleFile)).to('cpu')
predictor = Predictor().to('cpu')
traced_script_module = torch.jit.trace(predictor, torch.unsqueeze(example, 0))
torch.unsqueeze(example, 0)
is also new to account for where I'm turning a single image input into a batch of 1)
1.12.0+cu113
from google colab to save the scripted model
It looks like it's an exception from
torch::jit::load
, so maybe you can reproduce it with your minimal example
my minimal example doesn't have any problems, but i'm using libtorch-1.12.0+cpu
in that example
[2022-07-18 13:34:12.916] [openimages] [error] mllib internal error: Libtorch error:Dimension out of range (expected to be in range of [-1, 0], but got 1)
Exception raised from maybe_wrap_dim at /deepdetect/build/pytorch/src/pytorch/c10/core/WrapDimMinimal.h:25 (most recent call first):
frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x6c (0x7f55a6186dfc in /deepdetect/build/pytorch/src/pytorch/torch/lib/libc10.so)
frame #1: <unknown function> + 0xc6b58a (0x7f55a6e3158a in /deepdetect/build/pytorch/src/pytorch/torch/lib/libtorch_cpu.so)
frame #2: at::meta::structured__softmax::meta(at::Tensor const&, long, bool) + 0x37 (0x7f55a79ccbf7 in /deepdetect/build/pytorch/src/pytorch/torch/lib/libtorch_cpu.so)
frame #3: <unknown function> + 0x206dde5 (0x7f55a8233de5 in /deepdetect/build/pytorch/src/pytorch/torch/lib/libtorch_cpu.so)
frame #4: <unknown function> + 0x206de6c (0x7f55a8233e6c in /deepdetect/build/pytorch/src/pytorch/torch/lib/libtorch_cpu.so)
frame #5: at::_ops::_softmax::redispatch(c10::DispatchKeySet, at::Tensor const&, long, bool) + 0xd4 (0x7f55a8069de4 in /deepdetect/build/pytorch/src/pytorch/torch/lib/libtorch_cpu.so)
frame #6: <unknown function> + 0x3cb3dfe (0x7f55a9e79dfe in /deepdetect/build/pytorch/src/pytorch/torch/lib/libtorch_cpu.so)
frame #7: <unknown function> + 0x3cb42cf (0x7f55a9e7a2cf in /deepdetect/build/pytorch/src/pytorch/torch/lib/libtorch_cpu.so)
frame #8: at::_ops::_softmax::call(at::Tensor const&, long, bool) + 0x144 (0x7f55a80d8114 in /deepdetect/build/pytorch/src/pytorch/torch/lib/libtorch_cpu.so)
frame #9: at::native::softmax(at::Tensor const&, long, c10::optional<c10::ScalarType>) + 0xa6 (0x7f55a79cd5f6 in /deepdetect/build/pytorch/src/pytorch/torch/lib/libtorch_cpu.so)
frame #10: <unknown function> + 0x227db2b (0x7f55a8443b2b in /deepdetect/build/pytorch/src/pytorch/torch/lib/libtorch_cpu.so)
frame #11: at::_ops::softmax_int::call(at::Tensor const&, long, c10::optional<c10::ScalarType>) + 0x14d (0x7f55a80c712d in /deepdetect/build/pytorch/src/pytorch/torch/lib/libtorch_cpu.so)
frame #12: <unknown function> + 0x382006 (0x55c89956e006 in ./main/dede)
frame #13: <unknown function> + 0x25567e (0x55c89944167e in ./main/dede)
frame #14: <unknown function> + 0x255a08 (0x55c899441a08 in ./main/dede)
frame #15: <unknown function> + 0x255d28 (0x55c899441d28 in ./main/dede)
frame #16: <unknown function> + 0x256048 (0x55c899442048 in ./main/dede)
frame #17: <unknown function> + 0x256368 (0x55c899442368 in ./main/dede)
frame #18: <unknown function> + 0x256688 (0x55c899442688 in ./main/dede)
frame #19: <unknown function> + 0x2569a8 (0x55c8994429a8 in ./main/dede)
frame #20: <unknown function> + 0x256cc8 (0x55c899442cc8 in ./main/dede)
frame #21: <unknown function> + 0x256fe8 (0x55c899442fe8 in ./main/dede)
frame #22: <unknown function> + 0x257308 (0x55c899443308 in ./main/dede)
frame #23: <unknown function> + 0x257628 (0x55c899443628 in ./main/dede)
frame #24: <unknown function> + 0x257d66 (0x55c899443d66 in ./main/dede)
frame #25: <unknown function> + 0x513418 (0x55c8996ff418 in ./main/dede)
frame #26: <unknown function> + 0x203994 (0x55c8993ef994 in ./main/dede)
frame #27: <unknown function> + 0x1b41e2 (0x55c8993a01e2 in ./main/dede)
frame #28: <unknown function> + 0x874f66 (0x55c899a60f66 in ./main/dede)
frame #29: <unknown function> + 0x875ae2 (0x55c899a61ae2 in ./main/dede)
frame #30: <unknown function> + 0x879ca0 (0x55c899a65ca0 in ./main/dede)
frame #31: <unknown function> + 0xd6de4 (0x7f55a4e35de4 in /lib/x86_64-linux-gnu/libstdc++.so.6)
frame #32: <unknown function> + 0x8609 (0x7f55a4b96609 in /lib/x86_64-linux-gnu/libpthread.so.0)
frame #33: clone + 0x43 (0x7f55a4abb133 in /lib/x86_64-linux-gnu/libc.so.6)
_module.forward()
call, so progress!
torch::ones({1, 3,299,299})
as input they're identical, so I wonder if there's something different between how I'm reading/preprocessing the image vs how it's done in DD
InceptionV3_Logits_Conv2d_1c_1x1_convolution = self.InceptionV3_Logits_Conv2d_1c_1x1_convolution(InceptionV3_Logits_AvgPool_1a_8x8_AvgPool)
InceptionV3_Logits_SpatialSqueeze = torch.squeeze(InceptionV3_Logits_Conv2d_1c_1x1_convolution)
multi_predictions = F.sigmoid(InceptionV3_Logits_SpatialSqueeze)
return multi_predictions
in my minimal example, i see this in the input tensor just before calling forward()
(just looking at the first 10 values as a quick comparison):
Sizes: [1, 3, 299, 299]
before forward() inputs[0].toTensor()[0][0][0].slice(0, 0, 10):
20
20
21
21
20
20
15
8
8
8
in DD, I see this:
in_vals[0].toTensor().size(): [1, 3, 299, 299]
before forward() in_vals[0].toTensor()[0][0][0].slice(0, 0, 10):
20
20
21
21
21
21
16
9
8
8
cv::Mat img = cv::imread(argv[2]);
cv::cvtColor(img, img, cv::COLOR_BGR2RGB);
cv::resize(img, img, cv::Size(299, 299), 0, 0, cv::INTER_CUBIC);
at::Tensor tensor_image = torch::from_blob(img.data, { img.rows, img.cols, img.channels() }, at::kByte);
tensor_image = tensor_image.to(at::kFloat);
tensor_image = tensor_image.permute({ 2, 0, 1 });
std::vector<torch::jit::IValue> inputs;
tensor_image = torch::unsqueeze(tensor_image, 0);
inputs.push_back({tensor_image});
std::cout << "Sizes: " << inputs[0].toTensor().sizes() << std::endl;
std::cout << "before forward() inputs[0].toTensor()[0][1][0].slice(0, 0, 10): " << inputs[0].toTensor()[0][1][0].slice(0, 0, 10) << std::endl;
at::Tensor output = module.forward(inputs).toTensor();
cv::cvtColor(img, img, cv::COLOR_BGR2RGB);
from my minimal example it appears i get the same exact output from my example vs DD
rgb
param in DD new? I'm surprised to see in the API that it defaults to false
as I thought only OpenCV uses BGR and so caffe, tf, torch, and everything else in DD would need RGB anyways 🤔