blob: 0d66d8dac83de4fc0041432a0f514dc422c7c0fc [file] [log] [blame]
#!/bin/bash
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Saves the gdb index for a given binary and its shared library dependencies.
#
# This will run gdb index in parallel on a number of binaries using SIGUSR1
# as the communication mechanism to simulate a semaphore. Because of the
# nature of this technique, using "set -e" is very difficult. The SIGUSR1
# terminates a "wait" with an error which we need to interpret.
#
# When modifying this code, most of the real logic is in the index_one_file
# function. The rest is cleanup + sempahore plumbing.
# Cleanup temp directory and ensure all child jobs are dead-dead.
function on_exit {
trap "" EXIT USR1 # Avoid reentrancy.
local jobs=$(jobs -p)
if [ -n "$jobs" ]; then
echo -n "Killing outstanding index jobs..."
kill -KILL $(jobs -p)
wait
echo "done"
fi
if [ -f "$DIRECTORY" ]; then
echo -n "Removing temp directory $DIRECTORY..."
rm -rf $DIRECTORY
echo done
fi
}
# Add index to one binary.
function index_one_file {
local file=$1
local basename=$(basename "$file")
local readelf_out=$(readelf -S "$file")
if [[ $readelf_out =~ "gdb_index" ]]; then
echo "Skipped $basename -- already contains index."
else
local start=$(date +"%s%N")
echo "Adding index to $basename..."
gdb -batch "$file" -ex "save gdb-index $DIRECTORY" -ex "quit"
local index_file="$DIRECTORY/$basename.gdb-index"
if [ -f "$index_file" ]; then
objcopy --add-section .gdb_index="$index_file" \
--set-section-flags .gdb_index=readonly "$file" "$file"
local finish=$(date +"%s%N")
local elappsed=$(((finish - start)/1000000))
echo " ...$basename indexed. [${elappsed}ms]"
else
echo " ...$basename unindexable."
fi
fi
}
# Functions that when combined, concurrently index all files in FILES_TO_INDEX
# array. The global FILES_TO_INDEX is declared in the main body of the script.
function async_index {
# Start a background subshell to run the index command.
{
index_one_file $1
kill -SIGUSR1 $$ # $$ resolves to the parent script.
exit 129 # See comment above wait loop at bottom.
} &
}
CUR_FILE_NUM=0
function index_next {
if (( CUR_FILE_NUM >= ${#FILES_TO_INDEX[@]} )); then
return
fi
async_index "${FILES_TO_INDEX[CUR_FILE_NUM]}"
((CUR_FILE_NUM += 1)) || true
}
########
### Main body of the script.
if [[ ! $# == 1 ]]; then
echo "Usage: $0 path-to-binary"
exit 1
fi
FILENAME="$1"
if [[ ! -f "$FILENAME" ]]; then
echo "Path $FILENAME does not exist."
exit 1
fi
# Ensure we cleanup on on exit.
trap on_exit EXIT
# We're good to go! Create temp directory for index files.
DIRECTORY=$(mktemp -d)
echo "Made temp directory $DIRECTORY."
# Create array with the filename and all shared libraries that
# have the same dirname. The dirname is a signal that these
# shared libraries were part of the same build as the binary.
declare -a FILES_TO_INDEX=($FILENAME
$(ldd "$FILENAME" 2>/dev/null \
| grep $(dirname "$FILENAME") \
| sed "s/.*[ \t]\(.*\) (.*/\1/")
)
# Start concurrent indexing.
trap index_next USR1
# 4 is an arbitrary default. When changing, remember we are likely IO bound
# so basing this off the number of cores is not sensible.
INDEX_TASKS=${INDEX_TASKS:-4}
for ((i=0;i<${INDEX_TASKS};i++)); do
index_next
done
# Do a wait loop. Bash waits that terminate due a trap have an exit
# code > 128. We also ensure that our subshell's "normal" exit occurs with
# an exit code > 128. This allows us to do consider a > 128 exit code as
# an indication that the loop should continue. Unfortunately, it also means
# we cannot use set -e since technically the "wait" is failing.
wait
while (( $? > 128 )); do
wait
done