Memoization is very useful technique for developing algorithms. Python3 has native support for memoization and here is how it is used.

from functools import lru_cache

@lru_cache #memoization
def fib(n):
    if n < 2:
        return 1
    else:
        return fib(n-1) + fib(n-2)

Similar feature can be implemeted in c++ . Here is an example on how to use.

MEMOIZATION(uint64_t, fib,(int n))
{
   if(n < 2)
      return 1;
   return fib(n-1)+fib(n-2);
}

originaly defined uint64_t fib(int n) has to be wrapped in macro that takes 3 parameters. return_type, func_name and arguments. so it becomes MEMOIZATION(uint64_t, fib,(int n)) From user’s perspective it looks similar to python’s version.

Here is full implementation of macro MEMOIZATION, Memo class and helper function. This implementation is not same as lru_cache where old items in cache are discarded once limit is reached.

#include  <map>
#include  <tuple>

#define MEMOIZATION(ret,name,params) \
  ret name##nonmemo params; \
  auto name = memo_wrapper(name##nonmemo);\
  ret name##nonmemo params

template<class...Arg> struct Memo;

template<class R, class...Arg>
struct Memo<R(*)(Arg...)>
{    
    using funcpoint_t =  R(*)(Arg...a);
    using tuple_t= std::tuple<Arg...>;
    using map_t = std::map<tuple_t,R>;
    
    Memo(funcpoint_t fp):mFp(fp){}
    
    const R&  operator()(Arg...arg)
    {
       tuple_t tup(arg...);
       auto it = mMap.find(tup);
       if(it != mMap.end())
          return it->second;
       else
          return mMap.insert(std::make_pair(tup,mFp(arg...))).first->second;
    }
    
    private:
    funcpoint_t mFp;
    map_t mMap;
};

template <class R, class...Arg>
Memo< R(*)(Arg...)> 
memo_wrapper( R(*f)(Arg...)  ) 
{
    return Memo< R(*)(Arg...)>(f);
}