2021-01-28

nvJPEG: encode packed BGR

Well, my goal is simple -- trying to create a JPEG encoded image from buffer with packed/interleaved BGR data (could be RGB as well).

The NVidia docs contain an example, the proper image input is essentially described here.

So I tried the following:

#include <nvjpeg.h>

// very simple
typedef struct {
    int width;
    int height;
    unsigned char *buffer; 
    unsigned long data_size; 
} my_bitmap_type;


std::vector<unsigned char> BitmapToJpegCUDA(const my_bitmap_type *image) 
{
  nvjpegHandle_t nv_handle;
  nvjpegEncoderState_t nv_enc_state;
  nvjpegEncoderParams_t nv_enc_params;
  cudaStream_t stream = NULL;

  nvjpegStatus_t er;
  nvjpegCreateSimple(&nv_handle);
  nvjpegEncoderStateCreate(nv_handle, &nv_enc_state, stream);
  nvjpegEncoderParamsCreate(nv_handle, &nv_enc_params, stream);

  nvjpegImage_t nv_image;
  nv_image.channel[0] = image->buffer;
  nv_image.pitch[0] = 3 * image->width;

  // Nope, that's for planar images!

  // nv_image.channel[0] = image->buffer;
  // nv_image.channel[1] = image->buffer + image->width * image->height;
  // nv_image.channel[2] = image->buffer + 2 * image->width * image->height;
  // nv_image.pitch[0] = image->width;
  // nv_image.pitch[1] = image->width;
  // nv_image.pitch[2] = image->width;

  er = nvjpegEncodeImage(nv_handle, nv_enc_state, nv_enc_params, &nv_image,
                        NVJPEG_INPUT_BGRI, image->width, image->height, stream);
  LOG(ERROR) << "enc " << er;

  size_t length = 0;
  nvjpegEncodeRetrieveBitstream(nv_handle, nv_enc_state, NULL, &length, stream);

  cudaStreamSynchronize(stream);
  std::vector<unsigned char> jpeg(length);
  nvjpegEncodeRetrieveBitstream(nv_handle, nv_enc_state, jpeg.data(), &length, 0);

  nvjpegEncoderParamsDestroy(nv_enc_params);
  nvjpegEncoderStateDestroy(nv_enc_state);
  nvjpegDestroy(nv_handle);

  return jpeg;
}

The logger says that nvjpegEncodeImage just returns NVJPEG_STATUS_INVALID_PARAMETER, meaning nothing works. In case you suspect my_bitmap_type to be filled wrong, here's the similar turbojpeg-powered encoding:

#include <turbojpeg.h>

std::vector<unsigned char> BitmapToJpegBuffer(const my_bitmap_type *image)
{
    std::vector<unsigned char> out_data(3 * image->width * image->height);

    cudaError_t err = cudaMemcpy(out_data.data(), image->buffer, image->data_size, cudaMemcpyDeviceToHost);
    if (cudaSuccess != err) {
        LOG(ERROR) << "failed to copy CUDA memory: " << err;
    }

    tjhandle jpeg = tjInitCompress();
    unsigned char *encoded_buf = nullptr;
    long unsigned int encoded_sz = 0;

    int tjres = tjCompress2(jpeg,
                            out_data.data(),
                            image->width,
                            image->width * 3,
                            image->height,
                            TJPF_BGR,
                            &encoded_buf,
                            &encoded_sz,
                            TJSAMP_444,
                            95,
                            TJFLAG_FASTDCT);

    if (tjres != 0) {
        LOG(ERROR) << "jpeg compession failed!";
        return {};
    }

    std::vector<unsigned char> result(encoded_buf, encoded_buf + encoded_sz);
    tjFree(encoded_buf);
    tjDestroy(jpeg);

    return result;
}

... aaand it works pretty fine.

I'm desperate trying to figure out, what's missing in the code. Would gratefully appreciate any help or advice.

UPD: Using CentOS 7 / libnvjpeg-11-1.x86_64 (CUDA 11.1) / gcc 4.8.5



from Recent Questions - Stack Overflow https://ift.tt/2MwDxN7
https://ift.tt/eA8V8J

No comments:

Post a Comment