| // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py | 
 | // RUN: %clang_cc1 -ffixed-point -triple x86_64-unknown-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,SIGNED | 
 | // RUN: %clang_cc1 -ffixed-point -triple x86_64-unknown-linux-gnu -fpadding-on-unsigned-fixed-point -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,UNSIGNED | 
 |  | 
 | short _Accum sa; | 
 | _Accum a; | 
 | long _Accum la; | 
 |  | 
 | short _Fract sf; | 
 | _Fract f; | 
 | long _Fract lf; | 
 |  | 
 | unsigned short _Accum usa; | 
 | unsigned _Accum ua; | 
 | unsigned long _Accum ula; | 
 |  | 
 | unsigned short _Fract usf; | 
 | unsigned _Fract uf; | 
 | unsigned long _Fract ulf; | 
 |  | 
 | _Sat short _Accum sa_sat; | 
 | _Sat _Accum a_sat; | 
 |  | 
 | _Sat short _Fract sf_sat; | 
 | _Sat _Fract f_sat; | 
 |  | 
 | _Sat unsigned short _Accum usa_sat; | 
 | _Sat unsigned _Accum ua_sat; | 
 |  | 
 | _Sat unsigned short _Fract usf_sat; | 
 | _Sat unsigned _Fract uf_sat; | 
 |  | 
 | int i; | 
 | unsigned u; | 
 |  | 
 |  | 
 | // CHECK-LABEL: @sleft_sasai( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @sa, align 2 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = shl i16 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i16 [[TMP3]], ptr @sa, align 2 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sleft_sasai(void) { | 
 |   sa = sa << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @sleft_aai( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @a, align 4 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP0]], [[TMP1]] | 
 | // CHECK-NEXT:    store i32 [[TMP2]], ptr @a, align 4 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sleft_aai(void) { | 
 |   a = a << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @sleft_lalai( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @la, align 8 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i64 [[TMP3]], ptr @la, align 8 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sleft_lalai(void) { | 
 |   la = la << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @sleft_sfsfi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr @sf, align 1 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = shl i8 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i8 [[TMP3]], ptr @sf, align 1 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sleft_sfsfi(void) { | 
 |   sf = sf << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @sleft_ffi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @f, align 2 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = shl i16 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i16 [[TMP3]], ptr @f, align 2 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sleft_ffi(void) { | 
 |   f = f << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @sleft_lflfi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @lf, align 4 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP0]], [[TMP1]] | 
 | // CHECK-NEXT:    store i32 [[TMP2]], ptr @lf, align 4 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sleft_lflfi(void) { | 
 |   lf = lf << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @sleft_aau( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @a, align 4 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @u, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP0]], [[TMP1]] | 
 | // CHECK-NEXT:    store i32 [[TMP2]], ptr @a, align 4 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sleft_aau(void) { | 
 |   a = a << u; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @sleft_ffu( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @f, align 2 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @u, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = shl i16 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i16 [[TMP3]], ptr @f, align 2 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sleft_ffu(void) { | 
 |   f = f << u; | 
 | } | 
 |  | 
 |  | 
 | // CHECK-LABEL: @uleft_usausai( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @usa, align 2 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = shl i16 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i16 [[TMP3]], ptr @usa, align 2 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uleft_usausai(void) { | 
 |   usa = usa << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @uleft_uauai( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @ua, align 4 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP0]], [[TMP1]] | 
 | // CHECK-NEXT:    store i32 [[TMP2]], ptr @ua, align 4 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uleft_uauai(void) { | 
 |   ua = ua << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @uleft_ulaulai( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @ula, align 8 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = shl i64 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i64 [[TMP3]], ptr @ula, align 8 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uleft_ulaulai(void) { | 
 |   ula = ula << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @uleft_usfusfi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr @usf, align 1 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = shl i8 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i8 [[TMP3]], ptr @usf, align 1 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uleft_usfusfi(void) { | 
 |   usf = usf << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @uleft_ufufi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @uf, align 2 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = shl i16 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i16 [[TMP3]], ptr @uf, align 2 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uleft_ufufi(void) { | 
 |   uf = uf << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @uleft_ulfulfi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @ulf, align 4 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP0]], [[TMP1]] | 
 | // CHECK-NEXT:    store i32 [[TMP2]], ptr @ulf, align 4 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uleft_ulfulfi(void) { | 
 |   ulf = ulf << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @uleft_uauau( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @ua, align 4 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @u, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP0]], [[TMP1]] | 
 | // CHECK-NEXT:    store i32 [[TMP2]], ptr @ua, align 4 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uleft_uauau(void) { | 
 |   ua = ua << u; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @uleft_ufufu( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @uf, align 2 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @u, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = shl i16 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i16 [[TMP3]], ptr @uf, align 2 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uleft_ufufu(void) { | 
 |   uf = uf << u; | 
 | } | 
 |  | 
 |  | 
 | // CHECK-LABEL: @sright_sasai( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @sa, align 2 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = ashr i16 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i16 [[TMP3]], ptr @sa, align 2 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sright_sasai(void) { | 
 |   sa = sa >> i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @sright_aai( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @a, align 4 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = ashr i32 [[TMP0]], [[TMP1]] | 
 | // CHECK-NEXT:    store i32 [[TMP2]], ptr @a, align 4 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sright_aai(void) { | 
 |   a = a >> i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @sright_lalai( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @la, align 8 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = ashr i64 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i64 [[TMP3]], ptr @la, align 8 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sright_lalai(void) { | 
 |   la = la >> i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @sright_sfsfi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr @sf, align 1 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = ashr i8 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i8 [[TMP3]], ptr @sf, align 1 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sright_sfsfi(void) { | 
 |   sf = sf >> i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @sright_ffi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @f, align 2 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = ashr i16 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i16 [[TMP3]], ptr @f, align 2 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sright_ffi(void) { | 
 |   f = f >> i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @sright_lflfi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @lf, align 4 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = ashr i32 [[TMP0]], [[TMP1]] | 
 | // CHECK-NEXT:    store i32 [[TMP2]], ptr @lf, align 4 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sright_lflfi(void) { | 
 |   lf = lf >> i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @sright_aau( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @a, align 4 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @u, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = ashr i32 [[TMP0]], [[TMP1]] | 
 | // CHECK-NEXT:    store i32 [[TMP2]], ptr @a, align 4 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sright_aau(void) { | 
 |   a = a >> u; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @sright_ffu( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @f, align 2 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @u, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = ashr i16 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i16 [[TMP3]], ptr @f, align 2 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void sright_ffu(void) { | 
 |   f = f >> u; | 
 | } | 
 |  | 
 |  | 
 | // CHECK-LABEL: @uright_usausai( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @usa, align 2 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = lshr i16 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i16 [[TMP3]], ptr @usa, align 2 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uright_usausai(void) { | 
 |   usa = usa >> i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @uright_uauai( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @ua, align 4 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP0]], [[TMP1]] | 
 | // CHECK-NEXT:    store i32 [[TMP2]], ptr @ua, align 4 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uright_uauai(void) { | 
 |   ua = ua >> i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @uright_ulaulai( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @ula, align 8 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = lshr i64 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i64 [[TMP3]], ptr @ula, align 8 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uright_ulaulai(void) { | 
 |   ula = ula >> i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @uright_usfusfi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr @usf, align 1 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = lshr i8 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i8 [[TMP3]], ptr @usf, align 1 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uright_usfusfi(void) { | 
 |   usf = usf >> i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @uright_ufufi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @uf, align 2 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = lshr i16 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i16 [[TMP3]], ptr @uf, align 2 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uright_ufufi(void) { | 
 |   uf = uf >> i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @uright_ulfulfi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @ulf, align 4 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP0]], [[TMP1]] | 
 | // CHECK-NEXT:    store i32 [[TMP2]], ptr @ulf, align 4 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uright_ulfulfi(void) { | 
 |   ulf = ulf >> i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @uright_uauau( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @ua, align 4 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @u, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP0]], [[TMP1]] | 
 | // CHECK-NEXT:    store i32 [[TMP2]], ptr @ua, align 4 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uright_uauau(void) { | 
 |   ua = ua >> u; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @uright_ufufu( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @uf, align 2 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @u, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = lshr i16 [[TMP0]], [[TMP2]] | 
 | // CHECK-NEXT:    store i16 [[TMP3]], ptr @uf, align 2 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void uright_ufufu(void) { | 
 |   uf = uf >> u; | 
 | } | 
 |  | 
 |  | 
 | // CHECK-LABEL: @satleft_sassasi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @sa_sat, align 2 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = call i16 @llvm.sshl.sat.i16(i16 [[TMP0]], i16 [[TMP2]]) | 
 | // CHECK-NEXT:    store i16 [[TMP3]], ptr @sa_sat, align 2 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void satleft_sassasi(void) { | 
 |   sa_sat = sa_sat << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @satleft_asasi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @a_sat, align 4 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.sshl.sat.i32(i32 [[TMP0]], i32 [[TMP1]]) | 
 | // CHECK-NEXT:    store i32 [[TMP2]], ptr @a_sat, align 4 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void satleft_asasi(void) { | 
 |   a_sat = a_sat << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @satleft_sfssfsi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr @sf_sat, align 1 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = call i8 @llvm.sshl.sat.i8(i8 [[TMP0]], i8 [[TMP2]]) | 
 | // CHECK-NEXT:    store i8 [[TMP3]], ptr @sf_sat, align 1 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void satleft_sfssfsi(void) { | 
 |   sf_sat = sf_sat << i; | 
 | } | 
 |  | 
 | // CHECK-LABEL: @satleft_fsfsi( | 
 | // CHECK-NEXT:  entry: | 
 | // CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @f_sat, align 2 | 
 | // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // CHECK-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // CHECK-NEXT:    [[TMP3:%.*]] = call i16 @llvm.sshl.sat.i16(i16 [[TMP0]], i16 [[TMP2]]) | 
 | // CHECK-NEXT:    store i16 [[TMP3]], ptr @f_sat, align 2 | 
 | // CHECK-NEXT:    ret void | 
 | // | 
 | void satleft_fsfsi(void) { | 
 |   f_sat = f_sat << i; | 
 | } | 
 |  | 
 | // SIGNED-LABEL: @satleft_usasusasi( | 
 | // SIGNED-NEXT:  entry: | 
 | // SIGNED-NEXT:    [[TMP0:%.*]] = load i16, ptr @usa_sat, align 2 | 
 | // SIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // SIGNED-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // SIGNED-NEXT:    [[TMP3:%.*]] = call i16 @llvm.ushl.sat.i16(i16 [[TMP0]], i16 [[TMP2]]) | 
 | // SIGNED-NEXT:    store i16 [[TMP3]], ptr @usa_sat, align 2 | 
 | // SIGNED-NEXT:    ret void | 
 | // | 
 | // UNSIGNED-LABEL: @satleft_usasusasi( | 
 | // UNSIGNED-NEXT:  entry: | 
 | // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, ptr @usa_sat, align 2 | 
 | // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // UNSIGNED-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // UNSIGNED-NEXT:    [[TMP3:%.*]] = call i16 @llvm.sshl.sat.i16(i16 [[TMP0]], i16 [[TMP2]]) | 
 | // UNSIGNED-NEXT:    store i16 [[TMP3]], ptr @usa_sat, align 2 | 
 | // UNSIGNED-NEXT:    ret void | 
 | // | 
 | void satleft_usasusasi(void) { | 
 |   usa_sat = usa_sat << i; | 
 | } | 
 |  | 
 | // SIGNED-LABEL: @satleft_uasuasi( | 
 | // SIGNED-NEXT:  entry: | 
 | // SIGNED-NEXT:    [[TMP0:%.*]] = load i32, ptr @ua_sat, align 4 | 
 | // SIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // SIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.ushl.sat.i32(i32 [[TMP0]], i32 [[TMP1]]) | 
 | // SIGNED-NEXT:    store i32 [[TMP2]], ptr @ua_sat, align 4 | 
 | // SIGNED-NEXT:    ret void | 
 | // | 
 | // UNSIGNED-LABEL: @satleft_uasuasi( | 
 | // UNSIGNED-NEXT:  entry: | 
 | // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i32, ptr @ua_sat, align 4 | 
 | // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // UNSIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.sshl.sat.i32(i32 [[TMP0]], i32 [[TMP1]]) | 
 | // UNSIGNED-NEXT:    store i32 [[TMP2]], ptr @ua_sat, align 4 | 
 | // UNSIGNED-NEXT:    ret void | 
 | // | 
 | void satleft_uasuasi(void) { | 
 |   ua_sat = ua_sat << i; | 
 | } | 
 |  | 
 | // SIGNED-LABEL: @satleft_usfsusfsi( | 
 | // SIGNED-NEXT:  entry: | 
 | // SIGNED-NEXT:    [[TMP0:%.*]] = load i8, ptr @usf_sat, align 1 | 
 | // SIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // SIGNED-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8 | 
 | // SIGNED-NEXT:    [[TMP3:%.*]] = call i8 @llvm.ushl.sat.i8(i8 [[TMP0]], i8 [[TMP2]]) | 
 | // SIGNED-NEXT:    store i8 [[TMP3]], ptr @usf_sat, align 1 | 
 | // SIGNED-NEXT:    ret void | 
 | // | 
 | // UNSIGNED-LABEL: @satleft_usfsusfsi( | 
 | // UNSIGNED-NEXT:  entry: | 
 | // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i8, ptr @usf_sat, align 1 | 
 | // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // UNSIGNED-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8 | 
 | // UNSIGNED-NEXT:    [[TMP3:%.*]] = call i8 @llvm.sshl.sat.i8(i8 [[TMP0]], i8 [[TMP2]]) | 
 | // UNSIGNED-NEXT:    store i8 [[TMP3]], ptr @usf_sat, align 1 | 
 | // UNSIGNED-NEXT:    ret void | 
 | // | 
 | void satleft_usfsusfsi(void) { | 
 |   usf_sat = usf_sat << i; | 
 | } | 
 |  | 
 | // SIGNED-LABEL: @satleft_ufsufsi( | 
 | // SIGNED-NEXT:  entry: | 
 | // SIGNED-NEXT:    [[TMP0:%.*]] = load i16, ptr @uf_sat, align 2 | 
 | // SIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // SIGNED-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // SIGNED-NEXT:    [[TMP3:%.*]] = call i16 @llvm.ushl.sat.i16(i16 [[TMP0]], i16 [[TMP2]]) | 
 | // SIGNED-NEXT:    store i16 [[TMP3]], ptr @uf_sat, align 2 | 
 | // SIGNED-NEXT:    ret void | 
 | // | 
 | // UNSIGNED-LABEL: @satleft_ufsufsi( | 
 | // UNSIGNED-NEXT:  entry: | 
 | // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, ptr @uf_sat, align 2 | 
 | // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, ptr @i, align 4 | 
 | // UNSIGNED-NEXT:    [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16 | 
 | // UNSIGNED-NEXT:    [[TMP3:%.*]] = call i16 @llvm.sshl.sat.i16(i16 [[TMP0]], i16 [[TMP2]]) | 
 | // UNSIGNED-NEXT:    store i16 [[TMP3]], ptr @uf_sat, align 2 | 
 | // UNSIGNED-NEXT:    ret void | 
 | // | 
 | void satleft_ufsufsi(void) { | 
 |   uf_sat = uf_sat << i; | 
 | } |