%apply (char* STRING,size_t LENGTH)
{
(char* dataBuffer, int size)
};
This is used for convert char* to byte[].
But I need to convert unsigned char* to short[]
%apply (unsigned char* STRING,size_t LENGTH)
{
(unsigned char* dataBuffer, int size)
};
This apply isn't working?
How can I fix it?
Related
This is the entrance to the atadenoise filter:
libavfilter/vf_atadenoise.c
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
...
}
which is used to call
s->dsp.filter_row[p](src, dst, srcf, w, mid, size, thra, thrb, weights);
Therefore, the specific noise reduction method is selected according to the configured parameters:
static void fweight_row##name(const uint8_t *ssrc, uint8_t *ddst, \
const uint8_t *ssrcf[SIZE], \
int w, int mid, int size, \
int thra, int thrb, const float *weights) \
...
static void fweight_row##name##_serial(const uint8_t *ssrc, uint8_t *ddst, \
const uint8_t *ssrcf[SIZE], \
int w, int mid, int size, \
int thra, int thrb, \
const float *weights) \
.....
I call atadenoise function with the example code:
doc/examples/filtering_video.c
// const char *filter_descr = "scale=78:24,transpose=cclock";
const char *filter_descr =
"atadenoise=0a=0.2:1a=0.2:2a=0.2:0b=0.3:1b=0.3:2b=0.3";
/* other way:
scale=78:24 [scl]; [scl] transpose=cclock // assumes "[in]" and "[out]" to be input output pads respectively
*/
......
int main(int argc, char **argv)
{
int ret;
AVPacket packet;
AVFrame *frame;
AVFrame *filt_frame;
if (argc != 2) {
fprintf(stderr, "Usage: %s file\n", argv[0]);
exit(1);
}
......
configuring const char *filter_descr in this way to call atadenoise:
const char *filter_descr =
"atadenoise=0a=0.2:1a=0.2:2a=0.2:0b=0.3:1b=0.3:2b=0.3";
If I want to connect atadenoise to my project, I should start from filter_slice. But I don't know how to feed parameters to it.
Currently I use Xcode to set break point in atadenoise, but that's not stable. And now I cannot break in to it. Maybe I have accidentally changed some configuration.
I am trying to make compile time checks over the format of the printf. Here is the code.
#include <type_traits>
#include <iostream>
template <typename CHAR, typename ...ARGS>
constexpr size_t trace_cond(CHAR fmt, ARGS&&... args) {
//always needs to pass
return 1;
}
template <size_t N, typename ...ARGS>
constexpr size_t trace_cond(const char (&fmt)[N], ARGS&&... args) {
//return parse(fmt) == args;
return sizeof...(args) != 0;
}
#define TRACE(fmt, ...) { \
static_assert(trace_cond(fmt, __VA_ARGS__), "Wrong ARGS"); \
printf(fmt, ##__VA_ARGS__); \
}
int main(int argc, char* argv[]) {
//working fine
TRACE("%d %d\n", 2, 3);
const char* format = "%d %d\n";
//error
TRACE(format, 2, 3);
}
So when the format is known at compile time I want to have a check(using static_assert) and if it is not known then the check should be not called or always to pass
Obs:
Currently the code is not compiling because const char *format is not declared constexpr
The main should not be change because TRACE macro is used in a large codebase but changes to TRACE are more than welcome
So my question is:
Is there a way to skip static_assert or make it pass when fmt type is const char *
I think you can do what you want by moving the static_assert to the trace_cond
#include <type_traits>
#include <iostream>
template <size_t C, typename CHAR>
size_t trace_cond(CHAR fmt) {
//always needs to pass
return 1;
}
template <typename ...ARGS>
constexpr size_t count_args(ARGS&&... args) {
return sizeof...(args);
}
template <size_t C, size_t N>
constexpr size_t trace_cond(const char (&fmt)[N]) {
// TODO, parse fmt and do the needed checks
static_assert(C == 3, "");
return 1;
}
#define TRACE(fmt, ...) { \
trace_cond<count_args(__VA_ARGS__)>(fmt); \
printf(fmt, ##__VA_ARGS__); \
}
int main(int argc, char* argv[]) {
// Static check - hard coded to check for 3 arguments
TRACE("%d %d %d\n", 2, 3, 4);
const char* format = "%d %d\n";
// No checks performed
TRACE(format, 2, 3);
}
There is default support for AES-256 in Realm but can we change encryption algorithm to other than AES-256??
Short Answer: No, it is not a dynamically pluggable system.
The AES Encryption is baked into realm-core, the shared C++ library, at compile time and uses the various platform crypto features available (OS / hardware-based). It is open source, so in theory you could write a different provider and handle changes to methods like realm::util::encryption_read_barrier and realm::util::encryption_write_barrier as needed... or possibly do a paid contract with Realm based upon your custom encryption requirements.
Re: https://github.com/realm/realm-core/blob/master/src/realm/util/aes_cryptor.hpp
class AESCryptor {
public:
AESCryptor(const uint8_t* key);
~AESCryptor() noexcept;
void set_file_size(off_t new_size);
bool read(FileDesc fd, off_t pos, char* dst, size_t size);
void write(FileDesc fd, off_t pos, const char* src, size_t size) noexcept;
private:
enum EncryptionMode {
#if REALM_PLATFORM_APPLE
mode_Encrypt = kCCEncrypt,
mode_Decrypt = kCCDecrypt
#elif defined(_WIN32)
mode_Encrypt = 0,
mode_Decrypt = 1
#else
mode_Encrypt = AES_ENCRYPT,
mode_Decrypt = AES_DECRYPT
#endif
};
#if REALM_PLATFORM_APPLE
CCCryptorRef m_encr;
CCCryptorRef m_decr;
#elif defined(_WIN32)
BCRYPT_KEY_HANDLE m_aes_key_handle;
#else
AES_KEY m_ectx;
AES_KEY m_dctx;
#endif
uint8_t m_hmacKey[32];
std::vector<iv_table> m_iv_buffer;
std::unique_ptr<char[]> m_rw_buffer;
std::unique_ptr<char[]> m_dst_buffer;
void calc_hmac(const void* src, size_t len, uint8_t* dst, const uint8_t* key) const;
bool check_hmac(const void* data, size_t len, const uint8_t* hmac) const;
void crypt(EncryptionMode mode, off_t pos, char* dst, const char* src, const char* stored_iv) noexcept;
iv_table& get_iv_table(FileDesc fd, off_t data_pos) noexcept;
};
I'm using Tcl 8.6 and I'm trying to do something like this to add functions to the tcl interpreter
Tcl_Interp* interp,
void init() {
interp = Tcl_CreateInterp();
}
void add_tcl_function(char* cmd, function<int(int,char**)> F) {
obj2argv* o2a = new obj2argv;
auto lambda_proc = [&](
ClientData cdata,
Tcl_Interp* interp,
int objc,
Tcl_Obj* const objv[])
{
o2a->set(objc, objv);
F(objc, o2a->get_argv());
};
auto lamba_delete = [&](
delete o2a;
};
Tcl_CreateObjCommand(interp, cmd, lamda_proc, NULL, lamda_delete);
}
What I'm wondering is how to convert "Tcl_Obj* const objv[]" to "char** argv"?
I was thinking about creating a class:
class obj2argv {
obj2argv();
void set(int objc, Tcl_Obj* const objv[]);
char** get_argv();
private:
//...
};
any ideas on how to implement set() and get_argv()?
Is there an easier way to do this?
Thanks.
obj2argv* o2a = new obj2argv;
If you're interfacing a function that's fundamentally working with const char** for arguments, you should register the function with Tcl_CreateCommand and let Tcl handle the mapping to strings for you. It already has all the mechanisms required.
More formally, you are dealing with a gluing function with this signature:
typedef int (Tcl_CmdProc) (ClientData clientData, Tcl_Interp *interp,
int argc, CONST84 char *argv[]);
The CONST84 should be read as being plain const in all new code, and ClientData is a pointer-sized value that Tcl just hands around and never inspects (same as with your existing code).
If you are going to do the mapping yourself, Tcl_GetString takes a Tcl_Obj* and returns the char* representation of it. The representation should be usually treated as const; it simply isn't formally typed as such for historical reasons.
I wanted to add some more information:
I gave up on using lambda's because when I added capture list it won't convert the lambda to a function pointer for some reason. So I went with the traditional approach (see below). EXCEPT: I still have not idea why the TCL document says
typedef int Tcl_CmdProc(
ClientData clientData,
Tcl_Interp *interp,
int argc,
const char *argv[]);
But the compiler requires this to compile:
typedef int Tcl_CmdProc(
ClientData clientData,
Tcl_Interp *interp,
int argc,
Tcl_Obj* const* argv);
The Code:
int cmd_dispatch(
ClientData clientData,
Tcl_Interp* interp,
int argc,
Tcl_Obj* const* argv)
{
function<int(int,char**)> F = *(function<int(int,char**)>*)clientData;
return F(argc, (char**) argv); // <= CAST DOESN'T SEEM RIGHT
}
void cmd_delete(ClientData clientData)
{
}
void add_tcl_function(const char* cmd, function<int(int,char**)> F) {
Tcl_CreateObjCommand(interp, cmd, cmd_dispatch, (void*)&F, cmd_delete);
}
VERSION 2:
struct cmd_data {
//Tcl_Interp* interp,
function<int(int,char**)> F;
int argc;
char* argv[MAX_ARGS];
};
int cmd_dispatch(
ClientData clientData,
Tcl_Interp* interp,
int argc,
Tcl_Obj* const* objv)
{
auto cmd_data1 = (struct cmd_data*) clientData;
cmd_data1->argc = argc;
for(int i=0; ((i < argc) && (i < MAX_ARGS)); i++) {
cmd_data1->argv[i] = Tcl_GetString(objv[i]);
// Who owns object returned by Tcl_GetString?
// memory leak? or invalid after return from function?
// garbage collected by tcl interp?
}
return cmd_data1->F(argc, cmd_data1->argv);
}
void cmd_delete(ClientData clientData)
{
auto cmd_data1 = (struct cmd_data*) clientData;
if (cmd_data1) {
delete cmd_data1;
}
}
void add_tcl_function(const char* cmd, function<int(int,char**)> F) {
auto cmd_data1 = new struct cmd_data;
cmd_data1->F = F;
Tcl_CreateObjCommand(interp, cmd, cmd_dispatch, (void*)cmd_data1, cmd_delete);
}
void init_tcl_commands() {
auto lambda_hello = [&](int argc ,char** argv) -> int {
cout << "HELLO WORLD!\n";
return 0;
};
tcl_backend::add_tcl_function("hello", lambda_hello);
}
My target is to write a c++/cli wrap arount ffmpeg library, using by importing ffmpeg functions from dll-modules.
Later I will use this interface in c#.
This is my challenge, don't ask me why))
So i've implemented Wrap class, which is listed below:
namespace FFMpegWrapLib
{
public class Wrap
{
private:
public:
//wstring libavcodecDllName = "avcodec-56.dll";
//wstring libavformatDllName = "avformat-56.dll";
//wstring libswscaleDllName = "swscale-3.dll";
//wstring libavutilDllName = "avutil-54.dll";
HMODULE libavcodecDLL;
HMODULE libavformatDLL;
HMODULE libswsscaleDLL;
HMODULE libavutilDLL;
AVFormatContext **pFormatCtx = nullptr;
AVCodecContext *pCodecCtxOrig = nullptr;
AVCodecContext *pCodecCtx = nullptr;
AVCodec **pCodec = nullptr;
AVFrame **pFrame = nullptr;
AVFrame **pFrameRGB = nullptr;
AVPacket *packet = nullptr;
int *frameFinished;
int numBytes;
uint8_t *buffer = nullptr;
struct SwsContext *sws_ctx = nullptr;
void Init();
void AVRegisterAll();
void Release();
bool SaveFrame(const char *pFileName, AVFrame * frame, int w, int h);
bool GetStreamInfo();
int FindVideoStream();
bool OpenInput(const char* file);
AVCodec* FindDecoder();
AVCodecContext* AllocContext3();
bool CopyContext();
bool OpenCodec2();
AVFrame* AllocFrame();
int PictureGetSize();
void* Alloc(size_t size);
int PictureFill(AVPicture *, const uint8_t *, enum AVPixelFormat, int, int);
SwsContext* GetSwsContext(int, int, enum AVPixelFormat, int, int, enum AVPixelFormat, int, SwsFilter *, SwsFilter *, const double *);
int ReadFrame(AVFormatContext *s, AVPacket *pkt);
int DecodeVideo2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt);
int SwsScale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[]);
void PacketFree(AVPacket *pkt);
void BufferFree(void *ptr);
void FrameFree(AVFrame **frame);
int CodecClose(AVCodecContext *);
void CloseInput(AVFormatContext **);
bool SeekFrame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags);
Wrap();
~Wrap();
bool GetVideoFrame(char* str_in_file, char* str_out_img, uint64_t time);
};
public ref class managedWrap
{
public:
managedWrap(){}
~managedWrap(){ delete unmanagedWrap; }
bool GetVideoFrameToFile(char* str_in_file, char* str_out_img, uint64_t time)
{
return unmanagedWrap->GetVideoFrame(str_in_file, str_out_img, time);
}
static Wrap* unmanagedWrap = new Wrap();
};
}
So the imports to libavcodec and etc. are succesful.
The problem is in AccessViolationException during calling dll func, for example, in OpenInput (i.e. av_open_input in native ffmpeg library)
The OpenInput func code is below:
bool FFMpegWrapLib::Wrap::OpenInput(const char* file)
{
typedef int avformat_open_input(AVFormatContext **, const char *, AVInputFormat *, AVDictionary **);
avformat_open_input* pavformat_open_input = (avformat_open_input *)GetProcAddress(libavformatDLL, "avformat_open_input");
if (pavformat_open_input == nullptr)
{
throw exception("Unable to find avformat_open_input function address in libavformat module");
return false;
}
//pin_ptr<AVFormatContext *> pinFormatContext = &(new interior_ptr<AVFormatContext *>(pCodecCtx));
pFormatCtx = new AVFormatContext*;
//*pFormatCtx = new AVFormatContext;
int ret = pavformat_open_input(pFormatCtx, file, NULL, NULL); // here it fails
return ret == 0;
}
So the problem, i think, is that class-fields of Wrap class are in secure memory. And ffmpeg works with native memory, initialising pFormatCtx variable by it's address.
Can I avoid this, or it is impossible?
Got the same problem, you need to initialise AVFormatContext object.
Good Example:
AVFormatContext *pFormatCtx = avformat_alloc_context();
Bad example:
AVFormatContext *pFormatCtx = NULL;