A rust library to collect and convert Heap profiling data from thejemallocallocator and convert it to thepprofformat.
To understand how to use this together with Polar Signals Cloud to continuously collect profiling data, refer to theUse with Polar Signals Cloudsection.
This code was originally developed as part ofMaterialize,and then in a collaboration extracted into this standalone library.
Currently, this library only supports Linux.
Furthermore, you must be able to switch your allocator tojemalloc
.
If you need to continue using the default system allocator for any reason,
this library will not be useful.
Internally this library usestikv-jemalloc-ctl
to interact with jemalloc, so to use it, you must use the jemalloc allocator via thetikv-jemallocator
library.
When addingtikv-jemallocator
as a dependency, make sure to enable theprofiling
feature.
[dependencies]
[target.'cfg(not(target_env="msvc" ))'.dependencies]
tikv-jemallocator= {version="0.6.0",features= ["profiling","unprefixed_malloc_on_supported_platforms"] }
Note: We also recommend enabling the
unprefixed_malloc_on_supported_platforms
feature, not strictly necessary, but will influence the rest of the usage.
Then configure the global allocator and configure it with profiling enabled.
#[cfg(not(target_env ="msvc"))]
#[global_allocator]
staticALLOC:tikv_jemallocator::Jemalloc= tikv_jemallocator::Jemalloc;
#[allow(non_upper_case_globals)]
#[export_name ="malloc_conf"]
pubstaticmalloc_conf:&[u8]=b "prof:true,prof_active:true,lg_prof_sample:19\0";
If you do not use the
unprefixed_malloc_on_supported_platforms
feature, you have to name it_rjem_malloc_conf
it instead ofmalloc_conf
.
2^19 bytes (512KiB) is the default configuration for the sampling period, but we recommend being explicit. To understand more about jemalloc sampling check out thedetailed docson it.
We recommend serving the profiling data on an HTTP server such asaxum,that could look like this, and we'll intentionally include a 4mb allocation to trigger sampling.
#[tokio::main]
asyncfnmain(){
letmutv =vec![];
foriin0..1000000{
v.push(i);
}
letapp = axum::Router::new()
.route("/debug/pprof/heap",axum::routing::get(handle_get_heap));
// run our app with hyper, listening globally on port 3000
letlistener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap();
axum::serve(listener,app).await.unwrap();
}
useaxum::http::StatusCode;
useaxum::response::IntoResponse;
pubasyncfnhandle_get_heap()->Result<implIntoResponse,(StatusCode,String)>{
letmutprof_ctl = jemalloc_pprof::PROF_CTL.as_ref().unwrap().lock().await;
require_profiling_activated(&prof_ctl)?;
letpprof = prof_ctl
.dump_pprof()
.map_err(|err|(StatusCode::INTERNAL_SERVER_ERROR,err.to_string()))?;
Ok(pprof)
}
/// Checks whether jemalloc profiling is activated an returns an error response if not.
fnrequire_profiling_activated(prof_ctl:&jemalloc_pprof::JemallocProfCtl)->Result<(),(StatusCode,String)>{
ifprof_ctl.activated(){
Ok(())
}else{
Err((axum::http::StatusCode::FORBIDDEN,"heap profiling not activated".into()))
}
}
Then running the application, we can capture a profile and view it the pprof toolchain.
curl localhost:3000/debug/pprof/heap>heap.pb.gz
pprof -http=:8080 heap.pb.gz
Note: The profiling data is not symbolized, so either
addr2line
orllvm-addr2line
needs to be available in the path and pprof needs to be able to discover the respective debuginfos.
The way this library works is that it creates a new temporary file (in theplatform-specific default temp dir), and instructs jemalloc to dump a profile into that file. Therefore the platform respective temporary directory must be writeable by the process. After reading and converting it to pprof, the file is cleaned up via the destructor. A single profile tends to be only a few kilobytes large, so it doesn't require a significant space, but it's non-zero and needs to be writeable.
Polar Signals Cloud allows continuously collecting heap profiling data, so you always have the right profiling data available, and don't need to search for the right data, you already have it!
Polar Signals Cloud supports anything in the pprof format, so a process exposing the above explained pprof endpoint, can then be scraped as elaborated in thescraping docs.
The functionality to dump the current jemalloc heap profile in pprof
format is exposed to C and C++ (or any other language that can use
jemalloc and can link against libraries via the C ABI). This
functionality is exposed via thecapi
(C API) package.
The following prerequisites are necessary to build the C API package:
- Working Rust and C toolchains. The former can be installed by
following the instructions athttps://rustup.rs.The latter can be
installed via the distribution's package manager. For example, on
Ubuntu, run
sudo apt install build-essential
. jemalloc
and its development headers. For example, on Ubuntu, runsudo apt install libjemalloc-dev
.
Once the prerequisites are installed, the library can be built by
runningmake capi
.There are three files of
interest:
- The library itself, produced at
target/release/libjemalloc_pprof.so
- A header file, at
capi/include/jemalloc_pprof.h
- A manual page, at
capi/man/jemalloc_pprof.3
.
The procedure for installing and using these files depends on your distribution and build system.
Ensure that your binaries link against both jemalloc and
jemalloc_pprof by passing the linker flags-ljemalloc -ljemalloc_pprof
.The procedure for ensuring that these flags are
passed depends on your build system and is currently outside the scope
of this document.
Once that is done, profiling can be enabled either by setting the
MALLOC_CONF
variable or by defining a symbol calledmalloc_conf
in
the binary. For example:
exportMALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:19"
See thejemalloc
man page for more details. When profiling is
enabled, a profile may be dumped in pprof format via the
dump_jemalloc_pprof
function.
This program allocates between 1 and 10 MiB every 100 milliseconds,
and dumps a profile to the filemy_profile
every 2 seconds.
#include<assert.h>
#include<errno.h>
#include<unistd.h>
#include<stdlib.h>
#include<pthread.h>
#include<stdio.h>
#include<jemalloc_pprof.h>
void
a()
{
size_tsz=1*1024*1024;
char*x=malloc(sz);
for(size_ti=0;i<sz;++i) {
x[i]='\0';
}
}
void
b()
{
size_tsz=2*1024*1024;
char*x=malloc(sz);
for(size_ti=0;i<sz;++i) {
x[i]='\0';
}
}
void
c()
{
size_tsz=3*1024*1024;
char*x=malloc(sz);
for(size_ti=0;i<sz;++i) {
x[i]='\0';
}
}
void
d()
{
size_tsz=4*1024*1024;
char*x=malloc(sz);
for(size_ti=0;i<sz;++i) {
x[i]='\0';
}
}
void
e()
{
size_tsz=5*1024*1024;
char*x=malloc(sz);
for(size_ti=0;i<sz;++i) {
x[i]='\0';
}
}
void
f()
{
size_tsz=6*1024*1024;
char*x=malloc(sz);
for(size_ti=0;i<sz;++i) {
x[i]='\0';
}
}
void
g()
{
size_tsz=7*1024*1024;
char*x=malloc(sz);
for(size_ti=0;i<sz;++i) {
x[i]='\0';
}
}
void
h()
{
size_tsz=8*1024*1024;
char*x=malloc(sz);
for(size_ti=0;i<sz;++i) {
x[i]='\0';
}
}
void
j()
{
size_tsz=9*1024*1024;
char*x=malloc(sz);
for(size_ti=0;i<sz;++i) {
x[i]='\0';
}
}
void
k()
{
size_tsz=10*1024*1024;
char*x=malloc(sz);
for(size_ti=0;i<sz;++i) {
x[i]='\0';
}
}
void*
repeatedly_dump(void*ignored)
{
char*buf;
size_tlen=0;
intresult;
for(;;) {
sleep(2);
result=dump_jemalloc_pprof(&buf,&len);
if(result!=JP_SUCCESS) {
fprintf(stderr,"errno: %d\n",errno);
continue;
}
if(buf) {
FILE*file=fopen("my_profile","w");
assert(file);
fwrite(buf,sizeof(char),len,file);
fclose(file);
printf("dumped pprof of size %lu\n",len);
free(buf);
}
}
returnNULL;
}
int
main()
{
pthread_ttid;
intresult;
result=pthread_create(&tid,NULL,repeatedly_dump,NULL);
assert(!result);
for(;;) {
usleep(100000);
switch(rand() %10) {
case0:
a();
break;
case1:
b();
break;
case2:
c();
break;
case3:
d();
break;
case4:
e();
break;
case5:
f();
break;
case6:
g();
break;
case7:
h();
break;
case8:
j();
break;
case9:
k();
break;
}
}
}