# API Reference
# torchei
# torchei.fault_model
# fault_model
class fault_model()
fault model of DNN in torchei
# change_layer_filter
@log_wrap
def change_layer_filter(layer_filter: List[str]) -> None
Select keys from state_dict according to layer_filter
layer_filter consists of follow four elements:
must contain all,must contain one of,can't contain,least dimensions of layer's weight:
# time_decorator
def time_decorator(func)
return same function but record its time cost using self.time
# weight_ei
def weight_ei(inject_func) -> None
low-level method to inject weight error
# neuron_ei
def neuron_ei(inject_hook: Callable[[torch.nn.Module, tuple], None]) -> None
low-level method to inject neuron error
# reliability_calc
@log_wrap
def reliability_calc(iteration: int,
error_inject: Callable[[None], None],
kalman: bool = False,
adaptive: bool = False,
**kwargs) -> Union[List, float]
Optional params: group_size: Divide in group or not, >0 means group and its group_size kalman: Use Kalman Filter in estimating adaptive: Auto-Stop verbose_return: return (estimation, group estimation, group index)
# mc_attack
def mc_attack(iteration: int,
p: float,
attack_func: Callable[[float], float] = single_bit_flip,
kalman: bool = False,
attack_type="weight",
**kwargs) -> Union[List, float]
Inject error using Monte Carlo method
# get_mc_attacker
def get_mc_attacker(p: float,
attack_func: Callable[[float], float] = single_bit_flip,
attack_type="weight") -> Callable[[None], None]
Wrapper for injecting error using Monte Carlo method
# emat_attack
def emat_attack(iteration: int,
p: float,
kalman: bool = False,
**kwargs) -> Union[List, float]
Inject error using EMAT method
# get_emat_attacker
def get_emat_attacker(p: float) -> Callable[[None], None]
Wrapper for EMAT method, return a inject function
# layer_single_attack
@log_wrap
def layer_single_attack(layer_iter: int,
attack_func: Callable[[float], Any] = None,
layer_id: List = None,
error_rate=True) -> List[float]
Inject single error in layer per iteration
# get_param_size
def get_param_size() -> int
Calculate the total parameter size of the model
# calc_detail_info
@log_wrap
def calc_detail_info() -> None
An auxiliary function for sern_calc
to calculate the detail information of the model
# sern_calc
@log_wrap
def sern_calc(output_class: int = None) -> List
Calculating model's sbf error rate using sern algorithm
# unpack_weight
def unpack_weight() -> torch.Tensor
Unpack the weight of the model to one tensor
# bit_distribution_statistic
def bit_distribution_statistic() -> List
An auxiliary function for emat_attack
to calculate the bit distribution of the model
# register_hook
def register_hook(hook: Callable[..., None], hook_type="forward") -> None
Register a specified type hook function in specified layer
# zscore_protect
@log_wrap
def zscore_protect(layer_type: torch.nn.Module = torch.nn.Conv2d) -> None
Use zscore detect bit flip errors
# relu6_protection
@log_wrap
def relu6_protection(protect_layers=torch.nn.ReLU) -> None
Warnings:
this will lower model's precision when no fault happening
# get_emat_func
@log_wrap
def get_emat_func() -> Callable[[float], float]
return a simulate function that simulates single bit flip for layer single attack
# torchei.utils
Auto Generated by pydoc-markdown
← Preface