Commit 6b6539b4 authored by Dennis Rassmann's avatar Dennis Rassmann

Merge remote-tracking branch 'origin/android-msm-mako-3.4-lollipop-mr1' into exp

parents 66fce7ca f75bb4f8
......@@ -1274,6 +1274,19 @@ force_tllao - BOOLEAN
race condition where the sender deletes the cached link-layer address
prior to receiving a response to a previous solicitation."
optimistic_dad - BOOLEAN
Whether to perform Optimistic Duplicate Address Detection (RFC 4429).
0: disabled (default)
1: enabled
use_optimistic - BOOLEAN
If enabled, do not classify optimistic addresses as deprecated during
source address selection. Preferred addresses will still be chosen
before optimistic addresses, subject to other ranking in the source
address selection algorithm.
0: disabled (default)
1: enabled
icmp/*:
ratelimit - INTEGER
Limit the maximal rates for sending ICMPv6 packets.
......
......@@ -251,6 +251,7 @@ core-$(CONFIG_VFP) += arch/arm/vfp/
# If we have a machine-specific directory, then include it in the build.
core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
core-y += arch/arm/net/
core-y += arch/arm/crypto/
core-y += $(machdirs) $(platdirs)
drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/
......
......@@ -505,3 +505,4 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_CRYPTO_TWOFISH=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_CRYPTO_AES_ARM=y
#
# Arch-specific CryptoAPI modules.
#
obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
obj-$(CONFIG_CRYPTO_SHA512_ARM_NEON) += sha512-arm-neon.o
aes-arm-y := aes-armv4.o aes_glue.o
aes-arm-bs-y := aesbs-core.o aesbs-glue.o
sha1-arm-y := sha1-armv4-large.o sha1_glue.o
sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o
sha512-arm-neon-y := sha512-armv7-neon.o sha512_neon_glue.o
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) > $(@)
$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
$(call cmd,perl)
.PRECIOUS: $(obj)/aesbs-core.S
This diff is collapsed.
/*
* Glue Code for the asm optimized version of the AES Cipher Algorithm
*/
#include <linux/module.h>
#include <linux/crypto.h>
#include <crypto/aes.h>
#include "aes_glue.h"
EXPORT_SYMBOL(AES_encrypt);
EXPORT_SYMBOL(AES_decrypt);
EXPORT_SYMBOL(private_AES_set_encrypt_key);
EXPORT_SYMBOL(private_AES_set_decrypt_key);
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct AES_CTX *ctx = crypto_tfm_ctx(tfm);
AES_encrypt(src, dst, &ctx->enc_key);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct AES_CTX *ctx = crypto_tfm_ctx(tfm);
AES_decrypt(src, dst, &ctx->dec_key);
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct AES_CTX *ctx = crypto_tfm_ctx(tfm);
switch (key_len) {
case AES_KEYSIZE_128:
key_len = 128;
break;
case AES_KEYSIZE_192:
key_len = 192;
break;
case AES_KEYSIZE_256:
key_len = 256;
break;
default:
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
if (private_AES_set_encrypt_key(in_key, key_len, &ctx->enc_key) == -1) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
/* private_AES_set_decrypt_key expects an encryption key as input */
ctx->dec_key = ctx->enc_key;
if (private_AES_set_decrypt_key(in_key, key_len, &ctx->dec_key) == -1) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
return 0;
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct AES_CTX),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt
}
}
};
static int __init aes_init(void)
{
return crypto_register_alg(&aes_alg);
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&aes_alg);
}
module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)");
MODULE_LICENSE("GPL");
MODULE_ALIAS("aes");
MODULE_ALIAS("aes-asm");
MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
#define AES_MAXNR 14
struct AES_KEY {
unsigned int rd_key[4 * (AES_MAXNR + 1)];
int rounds;
};
struct AES_CTX {
struct AES_KEY enc_key;
struct AES_KEY dec_key;
};
asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
asmlinkage void AES_decrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey,
const int bits, struct AES_KEY *key);
asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey,
const int bits, struct AES_KEY *key);
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Cryptographic API.
* Glue code for the SHA1 Secure Hash Algorithm assembler implementation
*
* This file is based on sha1_generic.c and sha1_ssse3_glue.c
*
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) Mathias Krause <minipli@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
#include <asm/crypto/sha1.h>
asmlinkage void sha1_block_data_order(u32 *digest,
const unsigned char *data, unsigned int rounds);
static int sha1_init(struct shash_desc *desc)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
*sctx = (struct sha1_state){
.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
return 0;
}
static int __sha1_update(struct sha1_state *sctx, const u8 *data,
unsigned int len, unsigned int partial)
{
unsigned int done = 0;
sctx->count += len;
if (partial) {
done = SHA1_BLOCK_SIZE - partial;
memcpy(sctx->buffer + partial, data, done);
sha1_block_data_order(sctx->state, sctx->buffer, 1);
}
if (len - done >= SHA1_BLOCK_SIZE) {
const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
sha1_block_data_order(sctx->state, data + done, rounds);
done += rounds * SHA1_BLOCK_SIZE;
}
memcpy(sctx->buffer, data + done, len - done);
return 0;
}
int sha1_update_arm(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
int res;
/* Handle the fast case right here */
if (partial + len < SHA1_BLOCK_SIZE) {
sctx->count += len;
memcpy(sctx->buffer + partial, data, len);
return 0;
}
res = __sha1_update(sctx, data, len, partial);
return res;
}
EXPORT_SYMBOL_GPL(sha1_update_arm);
/* Add padding and return the message digest. */
static int sha1_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, padlen;
__be32 *dst = (__be32 *)out;
__be64 bits;
static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64 and append length */
index = sctx->count % SHA1_BLOCK_SIZE;
padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
/* We need to fill a whole block for __sha1_update() */
if (padlen <= 56) {
sctx->count += padlen;
memcpy(sctx->buffer + index, padding, padlen);
} else {
__sha1_update(sctx, padding, padlen, index);
}
__sha1_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_be32(sctx->state[i]);
/* Wipe context */
memset(sctx, 0, sizeof(*sctx));
return 0;
}
static int sha1_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int sha1_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init,
.update = sha1_update_arm,
.final = sha1_final,
.export = sha1_export,
.import = sha1_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-asm",
.cra_priority = 150,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init sha1_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit sha1_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(sha1_mod_init);
module_exit(sha1_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)");
MODULE_ALIAS("sha1");
MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#ifndef ASM_ARM_CRYPTO_SHA1_H
#define ASM_ARM_CRYPTO_SHA1_H
#include <linux/crypto.h>
#include <crypto/sha.h>
extern int sha1_update_arm(struct shash_desc *desc, const u8 *data,
unsigned int len);
#endif
......@@ -123,7 +123,7 @@ struct platform_device msm8064_pc_cntr = {
static struct msm_watchdog_pdata msm_watchdog_pdata = {
.pet_time = 10000,
.bark_time = 11000,
.bark_time = 45000,
.has_secure = true,
.needs_expired_enable = true,
.base = MSM_TMR0_BASE + WDT0_OFFSET,
......
This diff is collapsed.
......@@ -735,6 +735,7 @@ static int msm_rotator_get_plane_sizes(uint32_t format, uint32_t w, uint32_t h,
uint32_t fast_yuv_invalid_size_checker(unsigned char rot_mode,
uint32_t src_width,
uint32_t dst_width,
uint32_t src_height,
uint32_t dst_height,
uint32_t dstp0_ystride,
uint32_t is_planar420)
......@@ -749,6 +750,9 @@ uint32_t fast_yuv_invalid_size_checker(unsigned char rot_mode,
if (rot_mode & MDP_ROT_90) {
if ((src_height % 128) == 8)
return -EINVAL;
/* if rotation 90 degree on fast yuv
* rotator image input width has to be multiple of 8
* rotator image input height has to be multiple of 8
......@@ -801,9 +805,9 @@ uint32_t fast_yuv_invalid_size_checker(unsigned char rot_mode,
} else {
/* if NOT applying rotation 90 degree on fast yuv,
* rotator image input width has to be multiple of 8
* rotator image input height has to be multiple of 2
* rotator image input height has to be multiple of 8
*/
if (((dst_width % 8) != 0) || ((dst_height % 2) != 0))
if (((dst_width % 8) != 0) || ((dst_height % 8) != 0))
return -EINVAL;
}
......@@ -1957,6 +1961,7 @@ static int msm_rotator_start(unsigned long arg,
fast_yuv_en = !fast_yuv_invalid_size_checker(
info.rotations,
info.src.width,
info.src.height,
dst_w,
dst_h,
dst_w,
......
This diff is collapsed.
......@@ -487,6 +487,11 @@
#define A3XX_VBIF_PERF_PWR_CNT2_LO 0x307b
#define A3XX_VBIF_PERF_PWR_CNT2_HI 0x307c
#define A3XX_VBIF_XIN_HALT_CTRL0 0x3080
#define A3XX_VBIF_XIN_HALT_CTRL0_MASK 0x3F
#define A3XX_VBIF_XIN_HALT_CTRL1 0x3081
/* Bit flags for RBBM_CTL */
#define RBBM_RBBM_CTL_RESET_PWR_CTR0 BIT(0)
#define RBBM_RBBM_CTL_RESET_PWR_CTR1 BIT(1)
......
This diff is collapsed.
......@@ -314,6 +314,8 @@ enum adreno_regs {
ADRENO_REG_TP0_CHICKEN,
ADRENO_REG_RBBM_RBBM_CTL,
ADRENO_REG_UCHE_INVALIDATE0,
ADRENO_REG_VBIF_XIN_HALT_CTRL0,
ADRENO_REG_VBIF_XIN_HALT_CTRL1,
ADRENO_REG_REGISTER_MAX,
};
......
......@@ -4407,6 +4407,10 @@ static unsigned int a3xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_RBBM_CTL, A3XX_RBBM_RBBM_CTL),
ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0,
A3XX_UCHE_CACHE_INVALIDATE0_REG),
ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL0,
A3XX_VBIF_XIN_HALT_CTRL0),
ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
A3XX_VBIF_XIN_HALT_CTRL1),
};
const struct adreno_reg_offsets a3xx_reg_offsets = {
......
......@@ -965,6 +965,8 @@ static int dispatcher_do_fault(struct kgsl_device *device)
/* Skip the PM dump for a timeout because it confuses people */
set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
}
/* Set pagefault if it occurred */
kgsl_mmu_set_pagefault(&device->mmu);
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &base);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment