马上注册,结交更多好友,享用更多功能,让你轻松玩转社区。
您需要 登录 才可以下载或查看,没有账号?立即注册
x
- import numpy as np
- class Tensor(object):
- def __init__(self, data,
- autograd=False,
- creators=None,
- creation_op=None,
- id=None):
- self.data = np.array(data)
- self.autograd = autograd
- self.grad = None
- if (id is None):
- self.id = np.random.randint(0, 100000)
- else:
- self.id = id
- self.creators = creators
- self.creation_op = creation_op
- self.children = {}
- if(creators is not None):
- for c in creators:
- if(self.id not in c.children):
- c.children[self.id] = 1
- else:
- c.children[self.id] += 1
- def all_children_grads_accounted_for(self):
- for id, cnt in self.children.items():
- if (cnt != 0):
- return False
- return True
- def backward(self, grad=None, grad_origin=None):
- if (self.autograd):
- if (grad is None):
- grad = FloatTensor(np.ones_like(self.data))
- if (grad_origin is not None):
- if (self.children[grad_origin.id] == 0):
- raise Exception("cannot backprop more than once")
- else:
- self.children[grad_origin.id] -= 1
- if (self.grad is None):
- self.grad = grad
- else:
- self.grad += grad
- # grads must not have grads of their own
- assert grad.autograd == False
- # only continue backpropping if there's something to
- # backprop into and if all gradients (from children)
- # are accounted for override waiting for children if
- # "backprop" was called on this variable directly
- if (self.creators is not None and
- (self.all_children_grads_accounted_for() or
- grad_origin is None)):
- if (self.creation_op == "add"):
- self.creators[0].backward(self.grad, self)
- self.creators[1].backward(self.grad, self)
- def __add__(self, other):
- if (self.autograd and other.autograd):
- return Tensor(self.data + other.data,
- autograd=True,
- creators=[self, other],
- creation_op="add")
- return Tensor(self.data + other.data)
- def __repr__(self):
- return str(self.data.__repr__())
- def __str__(self):
- return str(self.data.__str__())
- a = Tensor([1, 2, 3, 4, 5], autograd=True)
- b = Tensor([2, 2, 2, 2, 2], autograd=True)
- c = Tensor([5, 4, 3, 2, 1], autograd=True)
- d = a + b
- e = b + c
- f = d + e
- f.backward(Tensor(np.array([1, 1, 1, 1, 1])))
- print(b.grad.data == np.array([2, 2, 2, 2, 2]))
复制代码
免责声明:如果侵犯了您的权益,请联系站长,我们会及时删除侵权内容,谢谢合作!更多信息从访问主页:qidao123.com:ToB企服之家,中国第一个企服评测及商务社交产业平台。 |