C++字符串常见混淆方案

正文

将字符串转换成等效int数组

std::vector<uint32_t> convert_wstring_to_int_array(const wchar_t* str) {
    std::vector<uint32_t> vec;

    for (size_t i = 0; i < wcslen(str); i += 2) {
        uint32_t val = (uint32_t)str[i] & 0xff;
        if (i < wcslen(str) - 1) {
            val |= (uint32_t)str[i + 1] << 16 & 0xff0000;
        }
        vec.push_back(val);
    }

    if (vec.back() & 0xffff0000) {
        vec.push_back(0);
    }

    return vec;
}

std::vector<uint32_t> convert_string_to_int_array(const char* str) {
    std::vector<uint32_t> vec;
    for (int i = 0; i < int(strlen(str)); i += 4) {
        uint32_t val = (uint32_t)str[i] & 0xff;
        if (i < int(strlen(str)) - 1) {
            val |= (uint32_t)str[i + 1] << 8 & 0xff00;
        }
        if (i < int(strlen(str)) - 2) {
            val |= (uint32_t)str[i + 2] << 16 & 0xff0000;
        }
        if (i < int(strlen(str)) - 3) {
            val |= (uint32_t)str[i + 3] << 24 & 0xff000000;
        }
        vec.push_back(val);
    }

	if (vec.back() & 0xff000000) {
        vec.push_back(0);
    }
	
    return vec;
}

编译时xor混淆字符串

#ifndef JM_XORSTR_HPP
#define JM_XORSTR_HPP

#define __clang__

#if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || defined(__arm__)
#include <arm_neon.h>
#elif defined(_M_X64) || defined(__amd64__) || defined(_M_IX86) || defined(__i386__)
#include <immintrin.h>
#else
#error Unsupported platform
#endif
#ifdef __clang__
#include "avxintrin.h"
#endif

#include <cstdint>
#include <cstddef>
#include <utility>
#include <type_traits>

#define xorstr(str) ::jm::xor_string([]() { return str; }, std::integral_constant<std::size_t, sizeof(str) / sizeof(*str)>{}, std::make_index_sequence<::jm::detail::_buffer_size<sizeof(str)>()>{})
#define xorstr_(str) xorstr(str).crypt_get()
#define xs(s) ((char *)xorstr_(s))
#define XorStr xs

#ifdef _MSC_VER
#define XORSTR_FORCEINLINE __forceinline
#else
#define XORSTR_FORCEINLINE __attribute__((always_inline)) inline
#endif

#define JM_XORSTR_DISABLE_AVX_INTRINSICS

namespace jm {

	namespace detail {

		template<std::size_t Size>
		XORSTR_FORCEINLINE constexpr std::size_t _buffer_size()
		{
			return ((Size / 16) + (Size % 16 != 0)) * 2;
		}

		template<std::uint32_t Seed>
		XORSTR_FORCEINLINE constexpr std::uint32_t key4() noexcept
		{
			std::uint32_t value = Seed;
			for (char c : __TIME__)
				value = static_cast<std::uint32_t>((value ^ c) * 16777619ull);
			return value;
		}

		template<std::size_t S>
		XORSTR_FORCEINLINE constexpr std::uint64_t key8()
		{
			constexpr auto first_part = key4<2166136261 + S>();
			constexpr auto second_part = key4<first_part>();
			return (static_cast<std::uint64_t>(first_part) << 32) | second_part;
		}

		// loads up to 8 characters of string into uint64 and xors it with the key
		template<std::size_t N, class CharT>
		XORSTR_FORCEINLINE constexpr std::uint64_t
			load_xored_str8(std::uint64_t key, std::size_t idx, const CharT* str) noexcept
		{
			using cast_type = typename std::make_unsigned<CharT>::type;
			constexpr auto value_size = sizeof(CharT);
			constexpr auto idx_offset = 8 / value_size;

			std::uint64_t value = key;
			for (std::size_t i = 0; i < idx_offset && i + idx * idx_offset < N; ++i)
				value ^=
				(std::uint64_t{ static_cast<cast_type>(str[i + idx * idx_offset]) }
			<< ((i % idx_offset) * 8 * value_size));

			return value;
		}

		// forces compiler to use registers instead of stuffing constants in rdata
		XORSTR_FORCEINLINE std::uint64_t load_from_reg(std::uint64_t value) noexcept
		{
#if defined(__clang__) || defined(__GNUC__)
			asm("" : "=r"(value) : "0"(value) : );
			return value;
#else
			volatile std::uint64_t reg = value;
			return reg;
#endif
		}

	} // namespace detail

	template<class CharT, std::size_t Size, class Keys, class Indices>
	class xor_string;

	template<class CharT, std::size_t Size, std::uint64_t... Keys, std::size_t... Indices>
	class xor_string<CharT, Size, std::integer_sequence<std::uint64_t, Keys...>, std::index_sequence<Indices...>> {
#ifndef JM_XORSTR_DISABLE_AVX_INTRINSICS
		constexpr static inline std::uint64_t alignment = ((Size > 16) ? 32 : 16);
#else
		constexpr static inline std::uint64_t alignment = 16;
#endif

		alignas(alignment) std::uint64_t _storage[sizeof...(Keys)];

	public:
		using value_type = CharT;
		using size_type = std::size_t;
		using pointer = CharT * ;
		using const_pointer = const CharT*;

		template<class L>
		XORSTR_FORCEINLINE xor_string(L l, std::integral_constant<std::size_t, Size>, std::index_sequence<Indices...>) noexcept
			: _storage{ ::jm::detail::load_from_reg((std::integral_constant<std::uint64_t, detail::load_xored_str8<Size>(Keys, Indices, l())>::value))... }
		{}

		XORSTR_FORCEINLINE constexpr size_type size() const noexcept
		{
			return Size - 1;
		}

		XORSTR_FORCEINLINE void crypt() noexcept
		{
			// everything is inlined by hand because a certain compiler with a certain linker is _very_ slow
#if defined(__clang__)
			alignas(alignment)
				std::uint64_t arr[]{ ::jm::detail::load_from_reg(Keys)... };
			std::uint64_t*    keys =
				(std::uint64_t*)::jm::detail::load_from_reg((std::uint64_t)arr);
#else
			alignas(alignment) std::uint64_t keys[]{ ::jm::detail::load_from_reg(Keys)... };
#endif

#if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || defined(__arm__)
#if defined(__clang__)
			((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : __builtin_neon_vst1q_v(
				reinterpret_cast<uint64_t*>(_storage) + Indices * 2,
				veorq_u64(__builtin_neon_vld1q_v(reinterpret_cast<const uint64_t*>(_storage) + Indices * 2, 51),
					__builtin_neon_vld1q_v(reinterpret_cast<const uint64_t*>(keys) + Indices * 2, 51)),
				51)), ...);
#else // GCC, MSVC
			((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : vst1q_u64(
				reinterpret_cast<uint64_t*>(_storage) + Indices * 2,
				veorq_u64(vld1q_u64(reinterpret_cast<const uint64_t*>(_storage) + Indices * 2),
					vld1q_u64(reinterpret_cast<const uint64_t*>(keys) + Indices * 2)))), ...);
#endif
#elif !defined(JM_XORSTR_DISABLE_AVX_INTRINSICS)
			((Indices >= sizeof(_storage) / 32 ? static_cast<void>(0) : __mm256_store_si256(
				reinterpret_cast<__m256i*>(_storage) + Indices,
				__mm256_xor_si256(
					__mm256_load_si256(reinterpret_cast<const __m256i*>(_storage) + Indices),
					__mm256_load_si256(reinterpret_cast<const __m256i*>(keys) + Indices)))), ...);

			if constexpr (sizeof(_storage) % 32 != 0)
				__mm_store_si128(
					reinterpret_cast<___m128i*>(_storage + sizeof...(Keys) - 2),
					__mm_xor_si128(__mm_load_si128(reinterpret_cast<const ___m128i*>(_storage + sizeof...(Keys) - 2)),
						__mm_load_si128(reinterpret_cast<const ___m128i*>(keys + sizeof...(Keys) - 2))));
#else
			((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : __mm_store_si128(
				reinterpret_cast<___m128i*>(_storage) + Indices,
				__mm_xor_si128(__mm_load_si128(reinterpret_cast<const ___m128i*>(_storage) + Indices),
					__mm_load_si128(reinterpret_cast<const ___m128i*>(keys) + Indices)))), ...);
#endif
		}

		XORSTR_FORCEINLINE const_pointer get() const noexcept
		{
			return reinterpret_cast<const_pointer>(_storage);
		}

		XORSTR_FORCEINLINE pointer get() noexcept
		{
			return reinterpret_cast<pointer>(_storage);
		}

		XORSTR_FORCEINLINE pointer crypt_get() noexcept
		{
			// crypt() is inlined by hand because a certain compiler with a certain linker is _very_ slow
#if defined(__clang__)
			alignas(alignment)
				std::uint64_t arr[]{ ::jm::detail::load_from_reg(Keys)... };
			std::uint64_t*    keys =
				(std::uint64_t*)::jm::detail::load_from_reg((std::uint64_t)arr);
#else
			alignas(alignment) std::uint64_t keys[]{ ::jm::detail::load_from_reg(Keys)... };
#endif

#if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || defined(__arm__)
#if defined(__clang__)
			((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : __builtin_neon_vst1q_v(
				reinterpret_cast<uint64_t*>(_storage) + Indices * 2,
				veorq_u64(__builtin_neon_vld1q_v(reinterpret_cast<const uint64_t*>(_storage) + Indices * 2, 51),
					__builtin_neon_vld1q_v(reinterpret_cast<const uint64_t*>(keys) + Indices * 2, 51)),
				51)), ...);
#else // GCC, MSVC
			((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : vst1q_u64(
				reinterpret_cast<uint64_t*>(_storage) + Indices * 2,
				veorq_u64(vld1q_u64(reinterpret_cast<const uint64_t*>(_storage) + Indices * 2),
					vld1q_u64(reinterpret_cast<const uint64_t*>(keys) + Indices * 2)))), ...);
#endif
#elif !defined(JM_XORSTR_DISABLE_AVX_INTRINSICS)
			((Indices >= sizeof(_storage) / 32 ? static_cast<void>(0) : __mm256_store_si256(
				reinterpret_cast<___m256i*>(_storage) + Indices,
				__mm256_xor_si256(
					__mm256_load_si256(reinterpret_cast<const ___m256i*>(_storage) + Indices),
					__mm256_load_si256(reinterpret_cast<const ___m256i*>(keys) + Indices)))), ...);

			if constexpr (sizeof(_storage) % 32 != 0)
				__mm_store_si128(
					reinterpret_cast<___m128i*>(_storage + sizeof...(Keys) - 2),
					__mm_xor_si128(__mm_load_si128(reinterpret_cast<const ___m128i*>(_storage + sizeof...(Keys) - 2)),
						__mm_load_si128(reinterpret_cast<const ___m128i*>(keys + sizeof...(Keys) - 2))));
#else
			((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : __mm_store_si128(
				reinterpret_cast<___m128i*>(_storage) + Indices,
				__mm_xor_si128(__mm_load_si128(reinterpret_cast<const ___m128i*>(_storage) + Indices),
					__mm_load_si128(reinterpret_cast<const ___m128i*>(keys) + Indices)))), ...);
#endif

			return (pointer)(_storage);
		}
	};

	template<class L, std::size_t Size, std::size_t... Indices>
	xor_string(L l, std::integral_constant<std::size_t, Size>, std::index_sequence<Indices...>)->xor_string<
		std::remove_const_t<std::remove_reference_t<decltype(l()[0])>>,
		Size,
		std::integer_sequence<std::uint64_t, detail::key8<Indices>()...>,
		std::index_sequence<Indices...>>;

} // namespace jm

#endif
posted @ 2024-04-19 19:56  倚剑问天  阅读(114)  评论(0编辑  收藏  举报